diff options
Diffstat (limited to 'drivers')
419 files changed, 13972 insertions, 11601 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 08e0140920e..b811f2173f6 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -54,17 +54,10 @@ config ACPI_PROCFS they have been replaced by functions in /sys. The deprecated files (and their replacements) include: - /proc/acpi/sleep (/sys/power/state) - /proc/acpi/info (/sys/module/acpi/parameters/acpica_version) - /proc/acpi/dsdt (/sys/firmware/acpi/tables/DSDT) - /proc/acpi/fadt (/sys/firmware/acpi/tables/FACP) - /proc/acpi/debug_layer (/sys/module/acpi/parameters/debug_layer) - /proc/acpi/debug_level (/sys/module/acpi/parameters/debug_level) - /proc/acpi/processor/*/power (/sys/devices/system/cpu/*/cpuidle/*) - /proc/acpi/processor/*/performance (/sys/devices/system/cpu/*/ - cpufreq/*) /proc/acpi/processor/*/throttling (/sys/class/thermal/ cooling_device*/*) + /proc/acpi/video/*/brightness (/sys/class/backlight/) + /proc/acpi/thermal_zone/*/* (/sys/class/thermal/) This option has no effect on /proc/acpi/ files and functions which do not yet exist in /sys. diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 833b582d176..3d031d02e54 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -37,8 +37,9 @@ acpi-y += ec.o acpi-$(CONFIG_ACPI_DOCK) += dock.o acpi-y += pci_root.o pci_link.o pci_irq.o pci_bind.o acpi-y += power.o -acpi-y += system.o event.o -acpi-$(CONFIG_ACPI_DEBUG) += debug.o +acpi-y += event.o +acpi-y += sysfs.o +acpi-$(CONFIG_DEBUG_FS) += debugfs.o acpi-$(CONFIG_ACPI_NUMA) += numa.o acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o ifdef CONFIG_ACPI_VIDEO diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index e0e6affb0d8..36867cd70ea 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h @@ -82,6 +82,10 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info); acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); +acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); + +acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info); + struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, u32 gpe_number); diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 18e796fe429..1d192142c69 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -108,7 +108,7 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE); /* * Optionally enable output from the AML Debug Object. */ -u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE); +u32 ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE); /* * Optionally copy the entire DSDT to local memory (instead of simply diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 1ee0bcf399a..df85b53a674 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -412,6 +412,7 @@ struct acpi_handler_info { acpi_event_handler address; /* Address of handler, if any */ void *context; /* Context to be passed to handler */ struct acpi_namespace_node *method_node; /* Method node for this GPE level (saved) */ + u8 orig_flags; /* Original misc info about this GPE */ }; union acpi_gpe_dispatch_info { diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index 7a6a3e6f4be..f226eac314d 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c @@ -137,6 +137,79 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) /******************************************************************************* * + * FUNCTION: acpi_raw_enable_gpe + * + * PARAMETERS: gpe_event_info - GPE to enable + * + * RETURN: Status + * + * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is + * hardware-enabled. + * + ******************************************************************************/ + +acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) +{ + acpi_status status = AE_OK; + + if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) { + return_ACPI_STATUS(AE_LIMIT); + } + + gpe_event_info->runtime_count++; + if (gpe_event_info->runtime_count == 1) { + status = acpi_ev_update_gpe_enable_mask(gpe_event_info); + if (ACPI_SUCCESS(status)) { + status = acpi_ev_enable_gpe(gpe_event_info); + } + + if (ACPI_FAILURE(status)) { + gpe_event_info->runtime_count--; + } + } + + return_ACPI_STATUS(status); +} + +/******************************************************************************* + * + * FUNCTION: acpi_raw_disable_gpe + * + * PARAMETERS: gpe_event_info - GPE to disable + * + * RETURN: Status + * + * DESCRIPTION: Remove a reference to a GPE. When the last reference is + * removed, the GPE is hardware-disabled. + * + ******************************************************************************/ + +acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) +{ + acpi_status status = AE_OK; + + if (!gpe_event_info->runtime_count) { + return_ACPI_STATUS(AE_LIMIT); + } + + gpe_event_info->runtime_count--; + if (!gpe_event_info->runtime_count) { + status = acpi_ev_update_gpe_enable_mask(gpe_event_info); + if (ACPI_SUCCESS(status)) { + status = acpi_hw_low_set_gpe(gpe_event_info, + ACPI_GPE_DISABLE); + } + + if (ACPI_FAILURE(status)) { + gpe_event_info->runtime_count++; + } + } + + return_ACPI_STATUS(status); +} + +/******************************************************************************* + * * FUNCTION: acpi_ev_low_get_gpe_info * * PARAMETERS: gpe_number - Raw GPE number diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c index 4a531cdf794..14e48add32f 100644 --- a/drivers/acpi/acpica/evxface.c +++ b/drivers/acpi/acpica/evxface.c @@ -691,12 +691,22 @@ acpi_install_gpe_handler(acpi_handle gpe_device, return_ACPI_STATUS(status); } + /* Allocate memory for the handler object */ + + handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_handler_info)); + if (!handler) { + status = AE_NO_MEMORY; + goto unlock_and_exit; + } + + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); + /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (!gpe_event_info) { status = AE_BAD_PARAMETER; - goto unlock_and_exit; + goto free_and_exit; } /* Make sure that there isn't a handler there already */ @@ -704,24 +714,30 @@ acpi_install_gpe_handler(acpi_handle gpe_device, if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_HANDLER) { status = AE_ALREADY_EXISTS; - goto unlock_and_exit; + goto free_and_exit; } /* Allocate and init handler object */ - handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_handler_info)); - if (!handler) { - status = AE_NO_MEMORY; - goto unlock_and_exit; - } - handler->address = address; handler->context = context; handler->method_node = gpe_event_info->dispatch.method_node; + handler->orig_flags = gpe_event_info->flags & + (ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); + + /* + * If the GPE is associated with a method and it cannot wake up the + * system from sleep states, it was enabled automatically during + * initialization, so it has to be disabled now to avoid spurious + * execution of the handler. + */ + + if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD) + && !(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) + (void)acpi_raw_disable_gpe(gpe_event_info); /* Install the handler */ - flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); gpe_event_info->dispatch.handler = handler; /* Setup up dispatch flags to indicate handler (vs. method) */ @@ -735,6 +751,11 @@ acpi_install_gpe_handler(acpi_handle gpe_device, unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); return_ACPI_STATUS(status); + +free_and_exit: + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); + ACPI_FREE(handler); + goto unlock_and_exit; } ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler) @@ -770,11 +791,17 @@ acpi_remove_gpe_handler(acpi_handle gpe_device, return_ACPI_STATUS(AE_BAD_PARAMETER); } + /* Make sure all deferred tasks are completed */ + + acpi_os_wait_events_complete(NULL); + status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); + /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); @@ -798,34 +825,34 @@ acpi_remove_gpe_handler(acpi_handle gpe_device, goto unlock_and_exit; } - /* Make sure all deferred tasks are completed */ - - (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); - acpi_os_wait_events_complete(NULL); - status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - /* Remove the handler */ - flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); handler = gpe_event_info->dispatch.handler; /* Restore Method node (if any), set dispatch flags */ gpe_event_info->dispatch.method_node = handler->method_node; - gpe_event_info->flags &= ~ACPI_GPE_DISPATCH_MASK; /* Clear bits */ - if (handler->method_node) { - gpe_event_info->flags |= ACPI_GPE_DISPATCH_METHOD; - } - acpi_os_release_lock(acpi_gbl_gpe_lock, flags); + gpe_event_info->flags &= + ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); + gpe_event_info->flags |= handler->orig_flags; + + /* + * If the GPE was previously associated with a method and it cannot wake + * up the system from sleep states, it should be enabled at this point + * to restore the post-initialization configuration. + */ + + if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD) + && !(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) + (void)acpi_raw_enable_gpe(gpe_event_info); /* Now we can free the handler object */ ACPI_FREE(handler); - unlock_and_exit: +unlock_and_exit: + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); + (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index 0ec900da579..304825528d4 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c @@ -294,7 +294,7 @@ ACPI_EXPORT_SYMBOL(acpi_gpe_wakeup) ******************************************************************************/ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) { - acpi_status status = AE_OK; + acpi_status status = AE_BAD_PARAMETER; struct acpi_gpe_event_info *gpe_event_info; acpi_cpu_flags flags; @@ -305,28 +305,10 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); - if (!gpe_event_info) { - status = AE_BAD_PARAMETER; - goto unlock_and_exit; + if (gpe_event_info) { + status = acpi_raw_enable_gpe(gpe_event_info); } - if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) { - status = AE_LIMIT; /* Too many references */ - goto unlock_and_exit; - } - - gpe_event_info->runtime_count++; - if (gpe_event_info->runtime_count == 1) { - status = acpi_ev_update_gpe_enable_mask(gpe_event_info); - if (ACPI_SUCCESS(status)) { - status = acpi_ev_enable_gpe(gpe_event_info); - } - if (ACPI_FAILURE(status)) { - gpe_event_info->runtime_count--; - } - } - -unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return_ACPI_STATUS(status); } @@ -348,7 +330,7 @@ ACPI_EXPORT_SYMBOL(acpi_enable_gpe) ******************************************************************************/ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number) { - acpi_status status = AE_OK; + acpi_status status = AE_BAD_PARAMETER; struct acpi_gpe_event_info *gpe_event_info; acpi_cpu_flags flags; @@ -359,32 +341,10 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number) /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); - if (!gpe_event_info) { - status = AE_BAD_PARAMETER; - goto unlock_and_exit; - } - - /* Hardware-disable a runtime GPE on removal of the last reference */ - - if (!gpe_event_info->runtime_count) { - status = AE_LIMIT; /* There are no references to remove */ - goto unlock_and_exit; + if (gpe_event_info) { + status = acpi_raw_disable_gpe(gpe_event_info) ; } - gpe_event_info->runtime_count--; - if (!gpe_event_info->runtime_count) { - status = acpi_ev_update_gpe_enable_mask(gpe_event_info); - if (ACPI_SUCCESS(status)) { - status = - acpi_hw_low_set_gpe(gpe_event_info, - ACPI_GPE_DISABLE); - } - if (ACPI_FAILURE(status)) { - gpe_event_info->runtime_count++; - } - } - -unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return_ACPI_STATUS(status); } @@ -411,7 +371,6 @@ acpi_status acpi_gpe_can_wake(acpi_handle gpe_device, u32 gpe_number) acpi_status status = AE_OK; struct acpi_gpe_event_info *gpe_event_info; acpi_cpu_flags flags; - u8 disable = 0; ACPI_FUNCTION_TRACE(acpi_gpe_can_wake); @@ -430,15 +389,12 @@ acpi_status acpi_gpe_can_wake(acpi_handle gpe_device, u32 gpe_number) } gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; - disable = (gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD) - && gpe_event_info->runtime_count; + if (gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD) { + (void)acpi_raw_disable_gpe(gpe_event_info); + } unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); - - if (disable) - status = acpi_disable_gpe(gpe_device, gpe_number); - return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_gpe_can_wake) diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c index 058b3df4827..f5cca3a1300 100644 --- a/drivers/acpi/acpica/utmutex.c +++ b/drivers/acpi/acpica/utmutex.c @@ -279,13 +279,10 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id) acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id) { - acpi_thread_id this_thread_id; - ACPI_FUNCTION_NAME(ut_release_mutex); - this_thread_id = acpi_os_get_thread_id(); ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %p releasing Mutex [%s]\n", - ACPI_CAST_PTR(void, this_thread_id), + ACPI_CAST_PTR(void, acpi_os_get_thread_id()), acpi_ut_get_mutex_name(mutex_id))); if (mutex_id > ACPI_MAX_MUTEX) { diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig index f8c668f27b5..907e350f1c7 100644 --- a/drivers/acpi/apei/Kconfig +++ b/drivers/acpi/apei/Kconfig @@ -28,3 +28,12 @@ config ACPI_APEI_EINJ EINJ provides a hardware error injection mechanism, it is mainly used for debugging and testing the other parts of APEI and some other RAS features. + +config ACPI_APEI_ERST_DEBUG + tristate "APEI Error Record Serialization Table (ERST) Debug Support" + depends on ACPI_APEI + help + ERST is a way provided by APEI to save and retrieve hardware + error infomation to and from a persistent store. Enable this + if you want to debugging and testing the ERST kernel support + and firmware implementation. diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile index b13b03a1778..d1d1bc0a4ee 100644 --- a/drivers/acpi/apei/Makefile +++ b/drivers/acpi/apei/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_ACPI_APEI) += apei.o obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o +obj-$(CONFIG_ACPI_APEI_ERST_DEBUG) += erst-dbg.o apei-y := apei-base.o hest.o cper.o erst.o diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index 216e1e948ff..73fd0c7487c 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c @@ -482,14 +482,14 @@ err_unmap_ioport: list_for_each_entry(res, &resources->ioport, list) { if (res == res_bak) break; - release_mem_region(res->start, res->end - res->start); + release_region(res->start, res->end - res->start); } res_bak = NULL; err_unmap_iomem: list_for_each_entry(res, &resources->iomem, list) { if (res == res_bak) break; - release_region(res->start, res->end - res->start); + release_mem_region(res->start, res->end - res->start); } return -EINVAL; } diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c new file mode 100644 index 00000000000..5281ddda277 --- /dev/null +++ b/drivers/acpi/apei/erst-dbg.c @@ -0,0 +1,207 @@ +/* + * APEI Error Record Serialization Table debug support + * + * ERST is a way provided by APEI to save and retrieve hardware error + * infomation to and from a persistent store. This file provide the + * debugging/testing support for ERST kernel support and firmware + * implementation. + * + * Copyright 2010 Intel Corp. + * Author: Huang Ying <ying.huang@intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/uaccess.h> +#include <acpi/apei.h> +#include <linux/miscdevice.h> + +#include "apei-internal.h" + +#define ERST_DBG_PFX "ERST DBG: " + +#define ERST_DBG_RECORD_LEN_MAX 4096 + +static void *erst_dbg_buf; +static unsigned int erst_dbg_buf_len; + +/* Prevent erst_dbg_read/write from being invoked concurrently */ +static DEFINE_MUTEX(erst_dbg_mutex); + +static int erst_dbg_open(struct inode *inode, struct file *file) +{ + if (erst_disable) + return -ENODEV; + + return nonseekable_open(inode, file); +} + +static long erst_dbg_ioctl(struct file *f, unsigned int cmd, unsigned long arg) +{ + int rc; + u64 record_id; + u32 record_count; + + switch (cmd) { + case APEI_ERST_CLEAR_RECORD: + rc = copy_from_user(&record_id, (void __user *)arg, + sizeof(record_id)); + if (rc) + return -EFAULT; + return erst_clear(record_id); + case APEI_ERST_GET_RECORD_COUNT: + rc = erst_get_record_count(); + if (rc < 0) + return rc; + record_count = rc; + rc = put_user(record_count, (u32 __user *)arg); + if (rc) + return rc; + return 0; + default: + return -ENOTTY; + } +} + +static ssize_t erst_dbg_read(struct file *filp, char __user *ubuf, + size_t usize, loff_t *off) +{ + int rc; + ssize_t len = 0; + u64 id; + + if (*off != 0) + return -EINVAL; + + if (mutex_lock_interruptible(&erst_dbg_mutex) != 0) + return -EINTR; + +retry_next: + rc = erst_get_next_record_id(&id); + if (rc) + goto out; + /* no more record */ + if (id == APEI_ERST_INVALID_RECORD_ID) + goto out; +retry: + rc = len = erst_read(id, erst_dbg_buf, erst_dbg_buf_len); + /* The record may be cleared by others, try read next record */ + if (rc == -ENOENT) + goto retry_next; + if (rc < 0) + goto out; + if (len > ERST_DBG_RECORD_LEN_MAX) { + pr_warning(ERST_DBG_PFX + "Record (ID: 0x%llx) length is too long: %zd\n", + id, len); + rc = -EIO; + goto out; + } + if (len > erst_dbg_buf_len) { + kfree(erst_dbg_buf); + rc = -ENOMEM; + erst_dbg_buf = kmalloc(len, GFP_KERNEL); + if (!erst_dbg_buf) + goto out; + erst_dbg_buf_len = len; + goto retry; + } + + rc = -EINVAL; + if (len > usize) + goto out; + + rc = -EFAULT; + if (copy_to_user(ubuf, erst_dbg_buf, len)) + goto out; + rc = 0; +out: + mutex_unlock(&erst_dbg_mutex); + return rc ? rc : len; +} + +static ssize_t erst_dbg_write(struct file *filp, const char __user *ubuf, + size_t usize, loff_t *off) +{ + int rc; + struct cper_record_header *rcd; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (usize > ERST_DBG_RECORD_LEN_MAX) { + pr_err(ERST_DBG_PFX "Too long record to be written\n"); + return -EINVAL; + } + + if (mutex_lock_interruptible(&erst_dbg_mutex)) + return -EINTR; + if (usize > erst_dbg_buf_len) { + kfree(erst_dbg_buf); + rc = -ENOMEM; + erst_dbg_buf = kmalloc(usize, GFP_KERNEL); + if (!erst_dbg_buf) + goto out; + erst_dbg_buf_len = usize; + } + rc = copy_from_user(erst_dbg_buf, ubuf, usize); + if (rc) { + rc = -EFAULT; + goto out; + } + rcd = erst_dbg_buf; + rc = -EINVAL; + if (rcd->record_length != usize) + goto out; + + rc = erst_write(erst_dbg_buf); + +out: + mutex_unlock(&erst_dbg_mutex); + return rc < 0 ? rc : usize; +} + +static const struct file_operations erst_dbg_ops = { + .owner = THIS_MODULE, + .open = erst_dbg_open, + .read = erst_dbg_read, + .write = erst_dbg_write, + .unlocked_ioctl = erst_dbg_ioctl, +}; + +static struct miscdevice erst_dbg_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "erst_dbg", + .fops = &erst_dbg_ops, +}; + +static __init int erst_dbg_init(void) +{ + return misc_register(&erst_dbg_dev); +} + +static __exit void erst_dbg_exit(void) +{ + misc_deregister(&erst_dbg_dev); + kfree(erst_dbg_buf); +} + +module_init(erst_dbg_init); +module_exit(erst_dbg_exit); + +MODULE_AUTHOR("Huang Ying"); +MODULE_DESCRIPTION("APEI Error Record Serialization Table debug support"); +MODULE_LICENSE("GPL"); diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index fd0cc016a09..385a6059714 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -41,6 +41,8 @@ #include <linux/interrupt.h> #include <linux/cper.h> #include <linux/kdebug.h> +#include <linux/platform_device.h> +#include <linux/mutex.h> #include <acpi/apei.h> #include <acpi/atomicio.h> #include <acpi/hed.h> @@ -87,6 +89,7 @@ struct ghes { * used for that. */ static LIST_HEAD(ghes_sci); +static DEFINE_MUTEX(ghes_list_mutex); static struct ghes *ghes_new(struct acpi_hest_generic *generic) { @@ -132,26 +135,26 @@ static void ghes_fini(struct ghes *ghes) } enum { - GHES_SER_NO = 0x0, - GHES_SER_CORRECTED = 0x1, - GHES_SER_RECOVERABLE = 0x2, - GHES_SER_PANIC = 0x3, + GHES_SEV_NO = 0x0, + GHES_SEV_CORRECTED = 0x1, + GHES_SEV_RECOVERABLE = 0x2, + GHES_SEV_PANIC = 0x3, }; static inline int ghes_severity(int severity) { switch (severity) { - case CPER_SER_INFORMATIONAL: - return GHES_SER_NO; - case CPER_SER_CORRECTED: - return GHES_SER_CORRECTED; - case CPER_SER_RECOVERABLE: - return GHES_SER_RECOVERABLE; - case CPER_SER_FATAL: - return GHES_SER_PANIC; + case CPER_SEV_INFORMATIONAL: + return GHES_SEV_NO; + case CPER_SEV_CORRECTED: + return GHES_SEV_CORRECTED; + case CPER_SEV_RECOVERABLE: + return GHES_SEV_RECOVERABLE; + case CPER_SEV_FATAL: + return GHES_SEV_PANIC; default: /* Unkown, go panic */ - return GHES_SER_PANIC; + return GHES_SEV_PANIC; } } @@ -237,16 +240,16 @@ static void ghes_clear_estatus(struct ghes *ghes) static void ghes_do_proc(struct ghes *ghes) { - int ser, processed = 0; + int sev, processed = 0; struct acpi_hest_generic_data *gdata; - ser = ghes_severity(ghes->estatus->error_severity); + sev = ghes_severity(ghes->estatus->error_severity); apei_estatus_for_each_section(ghes->estatus, gdata) { #ifdef CONFIG_X86_MCE if (!uuid_le_cmp(*(uuid_le *)gdata->section_type, CPER_SEC_PLATFORM_MEM)) { apei_mce_report_mem_error( - ser == GHES_SER_CORRECTED, + sev == GHES_SEV_CORRECTED, (struct cper_sec_mem_err *)(gdata+1)); processed = 1; } @@ -293,18 +296,15 @@ static struct notifier_block ghes_notifier_sci = { .notifier_call = ghes_notify_sci, }; -static int hest_ghes_parse(struct acpi_hest_header *hest_hdr, void *data) +static int __devinit ghes_probe(struct platform_device *ghes_dev) { struct acpi_hest_generic *generic; struct ghes *ghes = NULL; - int rc = 0; + int rc = -EINVAL; - if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR) - return 0; - - generic = (struct acpi_hest_generic *)hest_hdr; + generic = ghes_dev->dev.platform_data; if (!generic->enabled) - return 0; + return -ENODEV; if (generic->error_block_length < sizeof(struct acpi_hest_generic_status)) { @@ -327,62 +327,91 @@ static int hest_ghes_parse(struct acpi_hest_header *hest_hdr, void *data) ghes = NULL; goto err; } - switch (generic->notify.type) { - case ACPI_HEST_NOTIFY_POLLED: - pr_warning(GHES_PFX -"Generic hardware error source: %d notified via POLL is not supported!\n", - generic->header.source_id); - break; - case ACPI_HEST_NOTIFY_EXTERNAL: - case ACPI_HEST_NOTIFY_LOCAL: - pr_warning(GHES_PFX -"Generic hardware error source: %d notified via IRQ is not supported!\n", - generic->header.source_id); - break; - case ACPI_HEST_NOTIFY_SCI: + if (generic->notify.type == ACPI_HEST_NOTIFY_SCI) { + mutex_lock(&ghes_list_mutex); if (list_empty(&ghes_sci)) register_acpi_hed_notifier(&ghes_notifier_sci); list_add_rcu(&ghes->list, &ghes_sci); - break; - case ACPI_HEST_NOTIFY_NMI: - pr_warning(GHES_PFX -"Generic hardware error source: %d notified via NMI is not supported!\n", - generic->header.source_id); - break; - default: - pr_warning(FW_WARN GHES_PFX - "Unknown notification type: %u for generic hardware error source: %d\n", - generic->notify.type, generic->header.source_id); - break; + mutex_unlock(&ghes_list_mutex); + } else { + unsigned char *notify = NULL; + + switch (generic->notify.type) { + case ACPI_HEST_NOTIFY_POLLED: + notify = "POLL"; + break; + case ACPI_HEST_NOTIFY_EXTERNAL: + case ACPI_HEST_NOTIFY_LOCAL: + notify = "IRQ"; + break; + case ACPI_HEST_NOTIFY_NMI: + notify = "NMI"; + break; + } + if (notify) { + pr_warning(GHES_PFX +"Generic hardware error source: %d notified via %s is not supported!\n", + generic->header.source_id, notify); + } else { + pr_warning(FW_WARN GHES_PFX +"Unknown notification type: %u for generic hardware error source: %d\n", + generic->notify.type, generic->header.source_id); + } + rc = -ENODEV; + goto err; } + platform_set_drvdata(ghes_dev, ghes); return 0; err: - if (ghes) + if (ghes) { ghes_fini(ghes); + kfree(ghes); + } return rc; } -static void ghes_cleanup(void) +static int __devexit ghes_remove(struct platform_device *ghes_dev) { - struct ghes *ghes, *nghes; + struct ghes *ghes; + struct acpi_hest_generic *generic; - if (!list_empty(&ghes_sci)) - unregister_acpi_hed_notifier(&ghes_notifier_sci); + ghes = platform_get_drvdata(ghes_dev); + generic = ghes->generic; + + switch (generic->notify.type) { + case ACPI_HEST_NOTIFY_SCI: + mutex_lock(&ghes_list_mutex); + list_del_rcu(&ghes->list); + if (list_empty(&ghes_sci)) + unregister_acpi_hed_notifier(&ghes_notifier_sci); + mutex_unlock(&ghes_list_mutex); + break; + default: + BUG(); + break; + } synchronize_rcu(); + ghes_fini(ghes); + kfree(ghes); - list_for_each_entry_safe(ghes, nghes, &ghes_sci, list) { - list_del(&ghes->list); - ghes_fini(ghes); - kfree(ghes); - } + platform_set_drvdata(ghes_dev, NULL); + + return 0; } +static struct platform_driver ghes_platform_driver = { + .driver = { + .name = "GHES", + .owner = THIS_MODULE, + }, + .probe = ghes_probe, + .remove = ghes_remove, +}; + static int __init ghes_init(void) { - int rc; - if (acpi_disabled) return -ENODEV; @@ -391,32 +420,12 @@ static int __init ghes_init(void) return -EINVAL; } - rc = apei_hest_parse(hest_ghes_parse, NULL); - if (rc) { - pr_err(GHES_PFX - "Error during parsing HEST generic hardware error sources.\n"); - goto err_cleanup; - } - - if (list_empty(&ghes_sci)) { - pr_info(GHES_PFX - "No functional generic hardware error sources.\n"); - rc = -ENODEV; - goto err_cleanup; - } - - pr_info(GHES_PFX - "Generic Hardware Error Source support is initialized.\n"); - - return 0; -err_cleanup: - ghes_cleanup(); - return rc; + return platform_driver_register(&ghes_platform_driver); } static void __exit ghes_exit(void) { - ghes_cleanup(); + platform_driver_unregister(&ghes_platform_driver); } module_init(ghes_init); @@ -425,3 +434,4 @@ module_exit(ghes_exit); MODULE_AUTHOR("Huang Ying"); MODULE_DESCRIPTION("APEI Generic Hardware Error Source support"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:GHES"); diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index e7f40d362cb..343168d1826 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c @@ -34,6 +34,7 @@ #include <linux/kdebug.h> #include <linux/highmem.h> #include <linux/io.h> +#include <linux/platform_device.h> #include <acpi/apei.h> #include "apei-internal.h" @@ -47,11 +48,6 @@ EXPORT_SYMBOL_GPL(hest_disable); static struct acpi_table_hest *hest_tab; -static int hest_void_parse(struct acpi_hest_header *hest_hdr, void *data) -{ - return 0; -} - static int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = { [ACPI_HEST_TYPE_IA32_CHECK] = -1, /* need further calculation */ [ACPI_HEST_TYPE_IA32_CORRECTED_CHECK] = -1, @@ -125,6 +121,69 @@ int apei_hest_parse(apei_hest_func_t func, void *data) } EXPORT_SYMBOL_GPL(apei_hest_parse); +struct ghes_arr { + struct platform_device **ghes_devs; + unsigned int count; +}; + +static int hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void *data) +{ + int *count = data; + + if (hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR) + (*count)++; + return 0; +} + +static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data) +{ + struct acpi_hest_generic *generic; + struct platform_device *ghes_dev; + struct ghes_arr *ghes_arr = data; + int rc; + + if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR) + return 0; + generic = (struct acpi_hest_generic *)hest_hdr; + if (!generic->enabled) + return 0; + ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id); + if (!ghes_dev) + return -ENOMEM; + ghes_dev->dev.platform_data = generic; + rc = platform_device_add(ghes_dev); + if (rc) + goto err; + ghes_arr->ghes_devs[ghes_arr->count++] = ghes_dev; + + return 0; +err: + platform_device_put(ghes_dev); + return rc; +} + +static int hest_ghes_dev_register(unsigned int ghes_count) +{ + int rc, i; + struct ghes_arr ghes_arr; + + ghes_arr.count = 0; + ghes_arr.ghes_devs = kmalloc(sizeof(void *) * ghes_count, GFP_KERNEL); + if (!ghes_arr.ghes_devs) + return -ENOMEM; + + rc = apei_hest_parse(hest_parse_ghes, &ghes_arr); + if (rc) + goto err; +out: + kfree(ghes_arr.ghes_devs); + return rc; +err: + for (i = 0; i < ghes_arr.count; i++) + platform_device_unregister(ghes_arr.ghes_devs[i]); + goto out; +} + static int __init setup_hest_disable(char *str) { hest_disable = 1; @@ -137,6 +196,7 @@ static int __init hest_init(void) { acpi_status status; int rc = -ENODEV; + unsigned int ghes_count = 0; if (acpi_disabled) goto err; @@ -158,7 +218,11 @@ static int __init hest_init(void) goto err; } - rc = apei_hest_parse(hest_void_parse, NULL); + rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count); + if (rc) + goto err; + + rc = hest_ghes_dev_register(ghes_count); if (rc) goto err; diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index c1d23cd7165..5c221ab535d 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -1034,8 +1034,8 @@ static int __init acpi_init(void) acpi_scan_init(); acpi_ec_init(); acpi_power_init(); - acpi_system_init(); - acpi_debug_init(); + acpi_sysfs_init(); + acpi_debugfs_init(); acpi_sleep_proc_init(); acpi_wakeup_device_init(); return result; diff --git a/drivers/acpi/debug.c b/drivers/acpi/debug.c deleted file mode 100644 index 295dbfa2db9..00000000000 --- a/drivers/acpi/debug.c +++ /dev/null @@ -1,422 +0,0 @@ -/* - * debug.c - ACPI debug interface to userspace. - */ - -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/moduleparam.h> -#include <linux/debugfs.h> -#include <linux/slab.h> -#include <asm/uaccess.h> -#include <acpi/acpi_drivers.h> - -#define _COMPONENT ACPI_SYSTEM_COMPONENT -ACPI_MODULE_NAME("debug"); - -struct acpi_dlayer { - const char *name; - unsigned long value; -}; -struct acpi_dlevel { - const char *name; - unsigned long value; -}; -#define ACPI_DEBUG_INIT(v) { .name = #v, .value = v } - -static const struct acpi_dlayer acpi_debug_layers[] = { - ACPI_DEBUG_INIT(ACPI_UTILITIES), - ACPI_DEBUG_INIT(ACPI_HARDWARE), - ACPI_DEBUG_INIT(ACPI_EVENTS), - ACPI_DEBUG_INIT(ACPI_TABLES), - ACPI_DEBUG_INIT(ACPI_NAMESPACE), - ACPI_DEBUG_INIT(ACPI_PARSER), - ACPI_DEBUG_INIT(ACPI_DISPATCHER), - ACPI_DEBUG_INIT(ACPI_EXECUTER), - ACPI_DEBUG_INIT(ACPI_RESOURCES), - ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER), - ACPI_DEBUG_INIT(ACPI_OS_SERVICES), - ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER), - ACPI_DEBUG_INIT(ACPI_COMPILER), - ACPI_DEBUG_INIT(ACPI_TOOLS), - - ACPI_DEBUG_INIT(ACPI_BUS_COMPONENT), - ACPI_DEBUG_INIT(ACPI_AC_COMPONENT), - ACPI_DEBUG_INIT(ACPI_BATTERY_COMPONENT), - ACPI_DEBUG_INIT(ACPI_BUTTON_COMPONENT), - ACPI_DEBUG_INIT(ACPI_SBS_COMPONENT), - ACPI_DEBUG_INIT(ACPI_FAN_COMPONENT), - ACPI_DEBUG_INIT(ACPI_PCI_COMPONENT), - ACPI_DEBUG_INIT(ACPI_POWER_COMPONENT), - ACPI_DEBUG_INIT(ACPI_CONTAINER_COMPONENT), - ACPI_DEBUG_INIT(ACPI_SYSTEM_COMPONENT), - ACPI_DEBUG_INIT(ACPI_THERMAL_COMPONENT), - ACPI_DEBUG_INIT(ACPI_MEMORY_DEVICE_COMPONENT), - ACPI_DEBUG_INIT(ACPI_VIDEO_COMPONENT), - ACPI_DEBUG_INIT(ACPI_PROCESSOR_COMPONENT), -}; - -static const struct acpi_dlevel acpi_debug_levels[] = { - ACPI_DEBUG_INIT(ACPI_LV_INIT), - ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT), - ACPI_DEBUG_INIT(ACPI_LV_INFO), - - ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES), - ACPI_DEBUG_INIT(ACPI_LV_PARSE), - ACPI_DEBUG_INIT(ACPI_LV_LOAD), - ACPI_DEBUG_INIT(ACPI_LV_DISPATCH), - ACPI_DEBUG_INIT(ACPI_LV_EXEC), - ACPI_DEBUG_INIT(ACPI_LV_NAMES), - ACPI_DEBUG_INIT(ACPI_LV_OPREGION), - ACPI_DEBUG_INIT(ACPI_LV_BFIELD), - ACPI_DEBUG_INIT(ACPI_LV_TABLES), - ACPI_DEBUG_INIT(ACPI_LV_VALUES), - ACPI_DEBUG_INIT(ACPI_LV_OBJECTS), - ACPI_DEBUG_INIT(ACPI_LV_RESOURCES), - ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS), - ACPI_DEBUG_INIT(ACPI_LV_PACKAGE), - - ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS), - ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS), - ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS), - - ACPI_DEBUG_INIT(ACPI_LV_MUTEX), - ACPI_DEBUG_INIT(ACPI_LV_THREADS), - ACPI_DEBUG_INIT(ACPI_LV_IO), - ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS), - - ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE), - ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO), - ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES), - ACPI_DEBUG_INIT(ACPI_LV_EVENTS), -}; - -/* -------------------------------------------------------------------------- - FS Interface (/sys) - -------------------------------------------------------------------------- */ -static int param_get_debug_layer(char *buffer, const struct kernel_param *kp) -{ - int result = 0; - int i; - - result = sprintf(buffer, "%-25s\tHex SET\n", "Description"); - - for(i = 0; i <ARRAY_SIZE(acpi_debug_layers); i++) { - result += sprintf(buffer+result, "%-25s\t0x%08lX [%c]\n", - acpi_debug_layers[i].name, - acpi_debug_layers[i].value, - (acpi_dbg_layer & acpi_debug_layers[i].value) ? '*' : ' '); - } - result += sprintf(buffer+result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS", - ACPI_ALL_DRIVERS, - (acpi_dbg_layer & ACPI_ALL_DRIVERS) == - ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & - ACPI_ALL_DRIVERS) == 0 ? ' ' : '-'); - result += sprintf(buffer+result, "--\ndebug_layer = 0x%08X ( * = enabled)\n", acpi_dbg_layer); - - return result; -} - -static int param_get_debug_level(char *buffer, const struct kernel_param *kp) -{ - int result = 0; - int i; - - result = sprintf(buffer, "%-25s\tHex SET\n", "Description"); - - for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) { - result += sprintf(buffer+result, "%-25s\t0x%08lX [%c]\n", - acpi_debug_levels[i].name, - acpi_debug_levels[i].value, - (acpi_dbg_level & acpi_debug_levels[i]. - value) ? '*' : ' '); - } - result += sprintf(buffer+result, "--\ndebug_level = 0x%08X (* = enabled)\n", - acpi_dbg_level); - - return result; -} - -static struct kernel_param_ops acpi_debug_layer_ops = { - .set = param_set_uint, - .get = param_get_debug_layer, -}; - -static struct kernel_param_ops acpi_debug_level_ops = { - .set = param_set_uint, - .get = param_get_debug_level, -}; - -module_param_cb(debug_layer, &acpi_debug_layer_ops, &acpi_dbg_layer, 0644); -module_param_cb(debug_level, &acpi_debug_level_ops, &acpi_dbg_level, 0644); - -static char trace_method_name[6]; -module_param_string(trace_method_name, trace_method_name, 6, 0644); -static unsigned int trace_debug_layer; -module_param(trace_debug_layer, uint, 0644); -static unsigned int trace_debug_level; -module_param(trace_debug_level, uint, 0644); - -static int param_set_trace_state(const char *val, const struct kernel_param *kp) -{ - int result = 0; - - if (!strncmp(val, "enable", strlen("enable") - 1)) { - result = acpi_debug_trace(trace_method_name, trace_debug_level, - trace_debug_layer, 0); - if (result) - result = -EBUSY; - goto exit; - } - - if (!strncmp(val, "disable", strlen("disable") - 1)) { - int name = 0; - result = acpi_debug_trace((char *)&name, trace_debug_level, - trace_debug_layer, 0); - if (result) - result = -EBUSY; - goto exit; - } - - if (!strncmp(val, "1", 1)) { - result = acpi_debug_trace(trace_method_name, trace_debug_level, - trace_debug_layer, 1); - if (result) - result = -EBUSY; - goto exit; - } - - result = -EINVAL; -exit: - return result; -} - -static int param_get_trace_state(char *buffer, const struct kernel_param *kp) -{ - if (!acpi_gbl_trace_method_name) - return sprintf(buffer, "disable"); - else { - if (acpi_gbl_trace_flags & 1) - return sprintf(buffer, "1"); - else - return sprintf(buffer, "enable"); - } - return 0; -} - -static struct kernel_param_ops param_ops_trace_state = { - .set = param_set_trace_state, - .get = param_get_trace_state, -}; - -module_param_cb(trace_state, ¶m_ops_trace_state, NULL, 0644); - -/* -------------------------------------------------------------------------- - DebugFS Interface - -------------------------------------------------------------------------- */ - -static ssize_t cm_write(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - static char *buf; - static int uncopied_bytes; - struct acpi_table_header table; - acpi_status status; - - if (!(*ppos)) { - /* parse the table header to get the table length */ - if (count <= sizeof(struct acpi_table_header)) - return -EINVAL; - if (copy_from_user(&table, user_buf, - sizeof(struct acpi_table_header))) - return -EFAULT; - uncopied_bytes = table.length; - buf = kzalloc(uncopied_bytes, GFP_KERNEL); - if (!buf) - return -ENOMEM; - } - - if (uncopied_bytes < count) { - kfree(buf); - return -EINVAL; - } - - if (copy_from_user(buf + (*ppos), user_buf, count)) { - kfree(buf); - return -EFAULT; - } - - uncopied_bytes -= count; - *ppos += count; - - if (!uncopied_bytes) { - status = acpi_install_method(buf); - kfree(buf); - if (ACPI_FAILURE(status)) - return -EINVAL; - add_taint(TAINT_OVERRIDDEN_ACPI_TABLE); - } - - return count; -} - -static const struct file_operations cm_fops = { - .write = cm_write, -}; - -static int acpi_debugfs_init(void) -{ - struct dentry *acpi_dir, *cm_dentry; - - acpi_dir = debugfs_create_dir("acpi", NULL); - if (!acpi_dir) - goto err; - - cm_dentry = debugfs_create_file("custom_method", S_IWUGO, - acpi_dir, NULL, &cm_fops); - if (!cm_dentry) - goto err; - - return 0; - -err: - if (acpi_dir) - debugfs_remove(acpi_dir); - return -EINVAL; -} - -/* -------------------------------------------------------------------------- - FS Interface (/proc) - -------------------------------------------------------------------------- */ -#ifdef CONFIG_ACPI_PROCFS -#define ACPI_SYSTEM_FILE_DEBUG_LAYER "debug_layer" -#define ACPI_SYSTEM_FILE_DEBUG_LEVEL "debug_level" - -static int acpi_system_debug_proc_show(struct seq_file *m, void *v) -{ - unsigned int i; - - seq_printf(m, "%-25s\tHex SET\n", "Description"); - - switch ((unsigned long)m->private) { - case 0: - for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) { - seq_printf(m, "%-25s\t0x%08lX [%c]\n", - acpi_debug_layers[i].name, - acpi_debug_layers[i].value, - (acpi_dbg_layer & acpi_debug_layers[i]. - value) ? '*' : ' '); - } - seq_printf(m, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS", - ACPI_ALL_DRIVERS, - (acpi_dbg_layer & ACPI_ALL_DRIVERS) == - ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & - ACPI_ALL_DRIVERS) == - 0 ? ' ' : '-'); - seq_printf(m, - "--\ndebug_layer = 0x%08X (* = enabled, - = partial)\n", - acpi_dbg_layer); - break; - case 1: - for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) { - seq_printf(m, "%-25s\t0x%08lX [%c]\n", - acpi_debug_levels[i].name, - acpi_debug_levels[i].value, - (acpi_dbg_level & acpi_debug_levels[i]. - value) ? '*' : ' '); - } - seq_printf(m, "--\ndebug_level = 0x%08X (* = enabled)\n", - acpi_dbg_level); - break; - } - return 0; -} - -static int acpi_system_debug_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, acpi_system_debug_proc_show, PDE(inode)->data); -} - -static ssize_t acpi_system_debug_proc_write(struct file *file, - const char __user * buffer, - size_t count, loff_t *pos) -{ - char debug_string[12] = { '\0' }; - - - if (count > sizeof(debug_string) - 1) - return -EINVAL; - - if (copy_from_user(debug_string, buffer, count)) - return -EFAULT; - - debug_string[count] = '\0'; - - switch ((unsigned long)PDE(file->f_path.dentry->d_inode)->data) { - case 0: - acpi_dbg_layer = simple_strtoul(debug_string, NULL, 0); - break; - case 1: - acpi_dbg_level = simple_strtoul(debug_string, NULL, 0); - break; - default: - return -EINVAL; - } - - return count; -} - -static const struct file_operations acpi_system_debug_proc_fops = { - .owner = THIS_MODULE, - .open = acpi_system_debug_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = acpi_system_debug_proc_write, -}; -#endif - -int __init acpi_procfs_init(void) -{ -#ifdef CONFIG_ACPI_PROCFS - struct proc_dir_entry *entry; - int error = 0; - char *name; - - /* 'debug_layer' [R/W] */ - name = ACPI_SYSTEM_FILE_DEBUG_LAYER; - entry = proc_create_data(name, S_IFREG | S_IRUGO | S_IWUSR, - acpi_root_dir, &acpi_system_debug_proc_fops, - (void *)0); - if (!entry) - goto Error; - - /* 'debug_level' [R/W] */ - name = ACPI_SYSTEM_FILE_DEBUG_LEVEL; - entry = proc_create_data(name, S_IFREG | S_IRUGO | S_IWUSR, - acpi_root_dir, &acpi_system_debug_proc_fops, - (void *)1); - if (!entry) - goto Error; - - Done: - return error; - - Error: - remove_proc_entry(ACPI_SYSTEM_FILE_DEBUG_LEVEL, acpi_root_dir); - remove_proc_entry(ACPI_SYSTEM_FILE_DEBUG_LAYER, acpi_root_dir); - error = -ENODEV; - goto Done; -#else - return 0; -#endif -} - -int __init acpi_debug_init(void) -{ - acpi_debugfs_init(); - acpi_procfs_init(); - return 0; -} diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c new file mode 100644 index 00000000000..7de27d49c4b --- /dev/null +++ b/drivers/acpi/debugfs.c @@ -0,0 +1,93 @@ +/* + * debugfs.c - ACPI debugfs interface to userspace. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/uaccess.h> +#include <linux/debugfs.h> +#include <acpi/acpi_drivers.h> + +#define _COMPONENT ACPI_SYSTEM_COMPONENT +ACPI_MODULE_NAME("debugfs"); + + +/* /sys/modules/acpi/parameters/aml_debug_output */ + +module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object, + bool, 0644); +MODULE_PARM_DESC(aml_debug_output, + "To enable/disable the ACPI Debug Object output."); + +/* /sys/kernel/debug/acpi/custom_method */ + +static ssize_t cm_write(struct file *file, const char __user * user_buf, + size_t count, loff_t *ppos) +{ + static char *buf; + static int uncopied_bytes; + struct acpi_table_header table; + acpi_status status; + + if (!(*ppos)) { + /* parse the table header to get the table length */ + if (count <= sizeof(struct acpi_table_header)) + return -EINVAL; + if (copy_from_user(&table, user_buf, + sizeof(struct acpi_table_header))) + return -EFAULT; + uncopied_bytes = table.length; + buf = kzalloc(uncopied_bytes, GFP_KERNEL); + if (!buf) + return -ENOMEM; + } + + if (uncopied_bytes < count) { + kfree(buf); + return -EINVAL; + } + + if (copy_from_user(buf + (*ppos), user_buf, count)) { + kfree(buf); + return -EFAULT; + } + + uncopied_bytes -= count; + *ppos += count; + + if (!uncopied_bytes) { + status = acpi_install_method(buf); + kfree(buf); + if (ACPI_FAILURE(status)) + return -EINVAL; + add_taint(TAINT_OVERRIDDEN_ACPI_TABLE); + } + + return count; +} + +static const struct file_operations cm_fops = { + .write = cm_write, +}; + +int __init acpi_debugfs_init(void) +{ + struct dentry *acpi_dir, *cm_dentry; + + acpi_dir = debugfs_create_dir("acpi", NULL); + if (!acpi_dir) + goto err; + + cm_dentry = debugfs_create_file("custom_method", S_IWUGO, + acpi_dir, NULL, &cm_fops); + if (!cm_dentry) + goto err; + + return 0; + +err: + if (acpi_dir) + debugfs_remove(acpi_dir); + return -EINVAL; +} diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 4af6301601e..78b0164c35b 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -100,7 +100,8 @@ do_acpi_find_child(acpi_handle handle, u32 lvl, void *context, void **rv) status = acpi_get_object_info(handle, &info); if (ACPI_SUCCESS(status)) { - if (info->address == find->address) + if ((info->address == find->address) + && (info->valid & ACPI_VALID_ADR)) find->handle = handle; kfree(info); } diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 8ae27264a00..a212bfeddf8 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -27,12 +27,12 @@ int init_acpi_device_notify(void); int acpi_scan_init(void); -int acpi_system_init(void); +int acpi_sysfs_init(void); -#ifdef CONFIG_ACPI_DEBUG -int acpi_debug_init(void); +#ifdef CONFIG_DEBUG_FS +int acpi_debugfs_init(void); #else -static inline int acpi_debug_init(void) { return 0; } +static inline int acpi_debugfs_init(void) { return 0; } #endif /* -------------------------------------------------------------------------- diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index b0337d31460..5718566e00f 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -255,12 +255,10 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header, static int __init acpi_parse_srat(struct acpi_table_header *table) { - struct acpi_table_srat *srat; - if (!table) return -EINVAL; - srat = (struct acpi_table_srat *)table; + /* Real work done in acpi_table_parse_srat below. */ return 0; } diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index f14d3f251d2..65b25a303b8 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -141,15 +141,14 @@ static struct osi_linux { static void __init acpi_request_region (struct acpi_generic_address *addr, unsigned int length, char *desc) { - struct resource *res; - if (!addr->address || !length) return; + /* Resources are never freed */ if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO) - res = request_region(addr->address, length, desc); + request_region(addr->address, length, desc); else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) - res = request_mem_region(addr->address, length, desc); + request_mem_region(addr->address, length, desc); } static int __init acpi_reserve_resources(void) diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index f74d3b31e5c..844c155aeb0 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -40,8 +40,6 @@ #include <linux/init.h> #include <linux/types.h> #include <linux/slab.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #include "sleep.h" @@ -64,7 +62,6 @@ module_param_named(power_nocheck, acpi_power_nocheck, bool, 000); static int acpi_power_add(struct acpi_device *device); static int acpi_power_remove(struct acpi_device *device, int type); static int acpi_power_resume(struct acpi_device *device); -static int acpi_power_open_fs(struct inode *inode, struct file *file); static const struct acpi_device_id power_device_ids[] = { {ACPI_POWER_HID, 0}, @@ -99,14 +96,6 @@ struct acpi_power_resource { static struct list_head acpi_power_resource_list; -static const struct file_operations acpi_power_fops = { - .owner = THIS_MODULE, - .open = acpi_power_open_fs, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - /* -------------------------------------------------------------------------- Power Resource Management -------------------------------------------------------------------------- */ @@ -255,7 +244,6 @@ static int acpi_power_off_device(acpi_handle handle, struct acpi_device *dev) struct list_head *node, *next; struct acpi_power_reference *ref; - result = acpi_power_get_context(handle, &resource); if (result) return result; @@ -542,102 +530,6 @@ int acpi_power_transition(struct acpi_device *device, int state) } /* -------------------------------------------------------------------------- - FS Interface (/proc) - -------------------------------------------------------------------------- */ - -static struct proc_dir_entry *acpi_power_dir; - -static int acpi_power_seq_show(struct seq_file *seq, void *offset) -{ - int count = 0; - int result = 0, state; - struct acpi_power_resource *resource = NULL; - struct list_head *node, *next; - struct acpi_power_reference *ref; - - - resource = seq->private; - - if (!resource) - goto end; - - result = acpi_power_get_state(resource->device->handle, &state); - if (result) - goto end; - - seq_puts(seq, "state: "); - switch (state) { - case ACPI_POWER_RESOURCE_STATE_ON: - seq_puts(seq, "on\n"); - break; - case ACPI_POWER_RESOURCE_STATE_OFF: - seq_puts(seq, "off\n"); - break; - default: - seq_puts(seq, "unknown\n"); - break; - } - - mutex_lock(&resource->resource_lock); - list_for_each_safe(node, next, &resource->reference) { - ref = container_of(node, struct acpi_power_reference, node); - count++; - } - mutex_unlock(&resource->resource_lock); - - seq_printf(seq, "system level: S%d\n" - "order: %d\n" - "reference count: %d\n", - resource->system_level, - resource->order, count); - - end: - return 0; -} - -static int acpi_power_open_fs(struct inode *inode, struct file *file) -{ - return single_open(file, acpi_power_seq_show, PDE(inode)->data); -} - -static int acpi_power_add_fs(struct acpi_device *device) -{ - struct proc_dir_entry *entry = NULL; - - - if (!device) - return -EINVAL; - - if (!acpi_device_dir(device)) { - acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), - acpi_power_dir); - if (!acpi_device_dir(device)) - return -ENODEV; - } - - /* 'status' [R] */ - entry = proc_create_data(ACPI_POWER_FILE_STATUS, - S_IRUGO, acpi_device_dir(device), - &acpi_power_fops, acpi_driver_data(device)); - if (!entry) - return -EIO; - return 0; -} - -static int acpi_power_remove_fs(struct acpi_device *device) -{ - - if (acpi_device_dir(device)) { - remove_proc_entry(ACPI_POWER_FILE_STATUS, - acpi_device_dir(device)); - remove_proc_entry(acpi_device_bid(device), acpi_power_dir); - acpi_device_dir(device) = NULL; - } - - return 0; -} - -/* -------------------------------------------------------------------------- Driver Interface -------------------------------------------------------------------------- */ @@ -690,10 +582,6 @@ static int acpi_power_add(struct acpi_device *device) break; } - result = acpi_power_add_fs(device); - if (result) - goto end; - printk(KERN_INFO PREFIX "%s [%s] (%s)\n", acpi_device_name(device), acpi_device_bid(device), state ? "on" : "off"); @@ -715,8 +603,6 @@ static int acpi_power_remove(struct acpi_device *device, int type) resource = acpi_driver_data(device); - acpi_power_remove_fs(device); - mutex_lock(&resource->resource_lock); list_for_each_safe(node, next, &resource->reference) { struct acpi_power_reference *ref = container_of(node, struct acpi_power_reference, node); @@ -760,19 +646,6 @@ static int acpi_power_resume(struct acpi_device *device) int __init acpi_power_init(void) { - int result = 0; - INIT_LIST_HEAD(&acpi_power_resource_list); - - acpi_power_dir = proc_mkdir(ACPI_POWER_CLASS, acpi_root_dir); - if (!acpi_power_dir) - return -ENODEV; - - result = acpi_bus_register_driver(&acpi_power_driver); - if (result < 0) { - remove_proc_entry(ACPI_POWER_CLASS, acpi_root_dir); - return -ENODEV; - } - - return 0; + return acpi_bus_register_driver(&acpi_power_driver); } diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c index 1ac678d2c51..afad67769db 100644 --- a/drivers/acpi/proc.c +++ b/drivers/acpi/proc.c @@ -17,64 +17,11 @@ /* * this file provides support for: - * /proc/acpi/sleep * /proc/acpi/alarm * /proc/acpi/wakeup */ ACPI_MODULE_NAME("sleep") -#ifdef CONFIG_ACPI_PROCFS -static int acpi_system_sleep_seq_show(struct seq_file *seq, void *offset) -{ - int i; - - for (i = 0; i <= ACPI_STATE_S5; i++) { - if (sleep_states[i]) { - seq_printf(seq, "S%d ", i); - } - } - - seq_puts(seq, "\n"); - - return 0; -} - -static int acpi_system_sleep_open_fs(struct inode *inode, struct file *file) -{ - return single_open(file, acpi_system_sleep_seq_show, PDE(inode)->data); -} - -static ssize_t -acpi_system_write_sleep(struct file *file, - const char __user * buffer, size_t count, loff_t * ppos) -{ - char str[12]; - u32 state = 0; - int error = 0; - - if (count > sizeof(str) - 1) - goto Done; - memset(str, 0, sizeof(str)); - if (copy_from_user(str, buffer, count)) - return -EFAULT; - - /* Check for S4 bios request */ - if (!strcmp(str, "4b")) { - error = acpi_suspend(4); - goto Done; - } - state = simple_strtoul(str, NULL, 0); -#ifdef CONFIG_HIBERNATION - if (state == 4) { - error = hibernate(); - goto Done; - } -#endif - error = acpi_suspend(state); - Done: - return error ? error : count; -} -#endif /* CONFIG_ACPI_PROCFS */ #if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) || !defined(CONFIG_X86) /* use /sys/class/rtc/rtcX/wakealarm instead; it's not ACPI-specific */ @@ -463,17 +410,6 @@ static const struct file_operations acpi_system_wakeup_device_fops = { .release = single_release, }; -#ifdef CONFIG_ACPI_PROCFS -static const struct file_operations acpi_system_sleep_fops = { - .owner = THIS_MODULE, - .open = acpi_system_sleep_open_fs, - .read = seq_read, - .write = acpi_system_write_sleep, - .llseek = seq_lseek, - .release = single_release, -}; -#endif /* CONFIG_ACPI_PROCFS */ - #ifdef HAVE_ACPI_LEGACY_ALARM static const struct file_operations acpi_system_alarm_fops = { .owner = THIS_MODULE, @@ -495,12 +431,6 @@ static u32 rtc_handler(void *context) int __init acpi_sleep_proc_init(void) { -#ifdef CONFIG_ACPI_PROCFS - /* 'sleep' [R/W] */ - proc_create("sleep", S_IFREG | S_IRUGO | S_IWUSR, - acpi_root_dir, &acpi_system_sleep_fops); -#endif /* CONFIG_ACPI_PROCFS */ - #ifdef HAVE_ACPI_LEGACY_ALARM /* 'alarm' [R/W] */ proc_create("alarm", S_IFREG | S_IRUGO | S_IWUSR, diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 38ea0cc6dc4..15602189238 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -83,9 +83,6 @@ MODULE_LICENSE("GPL"); static int acpi_processor_add(struct acpi_device *device); static int acpi_processor_remove(struct acpi_device *device, int type); -#ifdef CONFIG_ACPI_PROCFS -static int acpi_processor_info_open_fs(struct inode *inode, struct file *file); -#endif static void acpi_processor_notify(struct acpi_device *device, u32 event); static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu); static int acpi_processor_handle_eject(struct acpi_processor *pr); @@ -113,15 +110,6 @@ static struct acpi_driver acpi_processor_driver = { #define INSTALL_NOTIFY_HANDLER 1 #define UNINSTALL_NOTIFY_HANDLER 2 -#ifdef CONFIG_ACPI_PROCFS -static const struct file_operations acpi_processor_info_fops = { - .owner = THIS_MODULE, - .open = acpi_processor_info_open_fs, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; -#endif DEFINE_PER_CPU(struct acpi_processor *, processors); EXPORT_PER_CPU_SYMBOL(processors); @@ -256,44 +244,8 @@ static int acpi_processor_errata(struct acpi_processor *pr) return result; } -/* -------------------------------------------------------------------------- - FS Interface (/proc) - -------------------------------------------------------------------------- */ - -#ifdef CONFIG_ACPI_PROCFS static struct proc_dir_entry *acpi_processor_dir = NULL; -static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset) -{ - struct acpi_processor *pr = seq->private; - - - if (!pr) - goto end; - - seq_printf(seq, "processor id: %d\n" - "acpi id: %d\n" - "bus mastering control: %s\n" - "power management: %s\n" - "throttling control: %s\n" - "limit interface: %s\n", - pr->id, - pr->acpi_id, - pr->flags.bm_control ? "yes" : "no", - pr->flags.power ? "yes" : "no", - pr->flags.throttling ? "yes" : "no", - pr->flags.limit ? "yes" : "no"); - - end: - return 0; -} - -static int acpi_processor_info_open_fs(struct inode *inode, struct file *file) -{ - return single_open(file, acpi_processor_info_seq_show, - PDE(inode)->data); -} - static int __cpuinit acpi_processor_add_fs(struct acpi_device *device) { struct proc_dir_entry *entry = NULL; @@ -306,14 +258,6 @@ static int __cpuinit acpi_processor_add_fs(struct acpi_device *device) return -ENODEV; } - /* 'info' [R] */ - entry = proc_create_data(ACPI_PROCESSOR_FILE_INFO, - S_IRUGO, acpi_device_dir(device), - &acpi_processor_info_fops, - acpi_driver_data(device)); - if (!entry) - return -EIO; - /* 'throttling' [R/W] */ entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING, S_IFREG | S_IRUGO | S_IWUSR, @@ -322,43 +266,20 @@ static int __cpuinit acpi_processor_add_fs(struct acpi_device *device) acpi_driver_data(device)); if (!entry) return -EIO; - - /* 'limit' [R/W] */ - entry = proc_create_data(ACPI_PROCESSOR_FILE_LIMIT, - S_IFREG | S_IRUGO | S_IWUSR, - acpi_device_dir(device), - &acpi_processor_limit_fops, - acpi_driver_data(device)); - if (!entry) - return -EIO; return 0; } static int acpi_processor_remove_fs(struct acpi_device *device) { if (acpi_device_dir(device)) { - remove_proc_entry(ACPI_PROCESSOR_FILE_INFO, - acpi_device_dir(device)); remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING, acpi_device_dir(device)); - remove_proc_entry(ACPI_PROCESSOR_FILE_LIMIT, - acpi_device_dir(device)); remove_proc_entry(acpi_device_bid(device), acpi_processor_dir); acpi_device_dir(device) = NULL; } return 0; } -#else -static inline int acpi_processor_add_fs(struct acpi_device *device) -{ - return 0; -} -static inline int acpi_processor_remove_fs(struct acpi_device *device) -{ - return 0; -} -#endif /* -------------------------------------------------------------------------- Driver Interface @@ -921,11 +842,9 @@ static int __init acpi_processor_init(void) memset(&errata, 0, sizeof(errata)); -#ifdef CONFIG_ACPI_PROCFS acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir); if (!acpi_processor_dir) return -ENOMEM; -#endif if (!cpuidle_register_driver(&acpi_idle_driver)) { printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", @@ -952,9 +871,7 @@ static int __init acpi_processor_init(void) out_cpuidle: cpuidle_unregister_driver(&acpi_idle_driver); -#ifdef CONFIG_ACPI_PROCFS remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); -#endif return result; } @@ -974,9 +891,7 @@ static void __exit acpi_processor_exit(void) cpuidle_unregister_driver(&acpi_idle_driver); -#ifdef CONFIG_ACPI_PROCFS remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); -#endif return; } diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index b4c2f3bdade..f4428e82b35 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -33,8 +33,6 @@ #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/slab.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> #include <linux/acpi.h> #include <linux/dmi.h> #include <linux/moduleparam.h> @@ -82,13 +80,6 @@ module_param(bm_check_disable, uint, 0000); static unsigned int latency_factor __read_mostly = 2; module_param(latency_factor, uint, 0644); -#ifdef CONFIG_ACPI_PROCFS -static u64 us_to_pm_timer_ticks(s64 t) -{ - return div64_u64(t * PM_TIMER_FREQUENCY, 1000000); -} -#endif - /* * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. * For now disable this. Probably a bug somewhere else. @@ -689,78 +680,6 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) return 0; } -#ifdef CONFIG_ACPI_PROCFS -static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) -{ - struct acpi_processor *pr = seq->private; - unsigned int i; - - - if (!pr) - goto end; - - seq_printf(seq, "active state: C%zd\n" - "max_cstate: C%d\n" - "maximum allowed latency: %d usec\n", - pr->power.state ? pr->power.state - pr->power.states : 0, - max_cstate, pm_qos_request(PM_QOS_CPU_DMA_LATENCY)); - - seq_puts(seq, "states:\n"); - - for (i = 1; i <= pr->power.count; i++) { - seq_printf(seq, " %cC%d: ", - (&pr->power.states[i] == - pr->power.state ? '*' : ' '), i); - - if (!pr->power.states[i].valid) { - seq_puts(seq, "<not supported>\n"); - continue; - } - - switch (pr->power.states[i].type) { - case ACPI_STATE_C1: - seq_printf(seq, "type[C1] "); - break; - case ACPI_STATE_C2: - seq_printf(seq, "type[C2] "); - break; - case ACPI_STATE_C3: - seq_printf(seq, "type[C3] "); - break; - default: - seq_printf(seq, "type[--] "); - break; - } - - seq_puts(seq, "promotion[--] "); - - seq_puts(seq, "demotion[--] "); - - seq_printf(seq, "latency[%03d] usage[%08d] duration[%020Lu]\n", - pr->power.states[i].latency, - pr->power.states[i].usage, - us_to_pm_timer_ticks(pr->power.states[i].time)); - } - - end: - return 0; -} - -static int acpi_processor_power_open_fs(struct inode *inode, struct file *file) -{ - return single_open(file, acpi_processor_power_seq_show, - PDE(inode)->data); -} - -static const struct file_operations acpi_processor_power_fops = { - .owner = THIS_MODULE, - .open = acpi_processor_power_open_fs, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; -#endif - /** * acpi_idle_bm_check - checks if bus master activity was detected */ @@ -803,13 +722,12 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) } else if (cx->entry_method == ACPI_CSTATE_HALT) { acpi_safe_halt(); } else { - int unused; /* IO port based C-state */ inb(cx->address); /* Dummy wait op - must do something useless after P_LVL2 read because chipsets cannot guarantee that STPCLK# signal gets asserted in time to freeze execution properly. */ - unused = inl(acpi_gbl_FADT.xpm_timer_block.address); + inl(acpi_gbl_FADT.xpm_timer_block.address); } start_critical_timings(); } @@ -1172,9 +1090,6 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, { acpi_status status = 0; static int first_run; -#ifdef CONFIG_ACPI_PROCFS - struct proc_dir_entry *entry = NULL; -#endif if (boot_option_idle_override) return 0; @@ -1223,15 +1138,6 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, if (cpuidle_register_device(&pr->power.dev)) return -EIO; } -#ifdef CONFIG_ACPI_PROCFS - /* 'power' [R] */ - entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER, - S_IRUGO, acpi_device_dir(device), - &acpi_processor_power_fops, - acpi_driver_data(device)); - if (!entry) - return -EIO; -#endif return 0; } @@ -1244,11 +1150,5 @@ int acpi_processor_power_exit(struct acpi_processor *pr, cpuidle_unregister_device(&pr->power.dev); pr->flags.power_setup_done = 0; -#ifdef CONFIG_ACPI_PROCFS - if (acpi_device_dir(device)) - remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, - acpi_device_dir(device)); -#endif - return 0; } diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c index 6deafb4aa0d..953b25fb986 100644 --- a/drivers/acpi/processor_thermal.c +++ b/drivers/acpi/processor_thermal.c @@ -30,8 +30,6 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/cpufreq.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> #include <linux/sysdev.h> #include <asm/uaccess.h> @@ -438,84 +436,3 @@ struct thermal_cooling_device_ops processor_cooling_ops = { .get_cur_state = processor_get_cur_state, .set_cur_state = processor_set_cur_state, }; - -/* /proc interface */ -#ifdef CONFIG_ACPI_PROCFS -static int acpi_processor_limit_seq_show(struct seq_file *seq, void *offset) -{ - struct acpi_processor *pr = seq->private; - - if (!pr) - goto end; - - if (!pr->flags.limit) { - seq_puts(seq, "<not supported>\n"); - goto end; - } - - seq_printf(seq, "active limit: P%d:T%d\n" - "user limit: P%d:T%d\n" - "thermal limit: P%d:T%d\n", - pr->limit.state.px, pr->limit.state.tx, - pr->limit.user.px, pr->limit.user.tx, - pr->limit.thermal.px, pr->limit.thermal.tx); - - end: - return 0; -} - -static int acpi_processor_limit_open_fs(struct inode *inode, struct file *file) -{ - return single_open(file, acpi_processor_limit_seq_show, - PDE(inode)->data); -} - -static ssize_t acpi_processor_write_limit(struct file * file, - const char __user * buffer, - size_t count, loff_t * data) -{ - int result = 0; - struct seq_file *m = file->private_data; - struct acpi_processor *pr = m->private; - char limit_string[25] = { '\0' }; - int px = 0; - int tx = 0; - - - if (!pr || (count > sizeof(limit_string) - 1)) { - return -EINVAL; - } - - if (copy_from_user(limit_string, buffer, count)) { - return -EFAULT; - } - - limit_string[count] = '\0'; - - if (sscanf(limit_string, "%d:%d", &px, &tx) != 2) { - printk(KERN_ERR PREFIX "Invalid data format\n"); - return -EINVAL; - } - - if (pr->flags.throttling) { - if ((tx < 0) || (tx > (pr->throttling.state_count - 1))) { - printk(KERN_ERR PREFIX "Invalid tx\n"); - return -EINVAL; - } - pr->limit.user.tx = tx; - } - - result = acpi_processor_apply_limit(pr); - - return count; -} - -const struct file_operations acpi_processor_limit_fops = { - .owner = THIS_MODULE, - .open = acpi_processor_limit_open_fs, - .read = seq_read, - .write = acpi_processor_write_limit, - .llseek = seq_lseek, - .release = single_release, -}; -#endif diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index 9ade1a5b32e..730863855ed 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -1215,7 +1215,6 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr) } /* proc interface */ -#ifdef CONFIG_ACPI_PROCFS static int acpi_processor_throttling_seq_show(struct seq_file *seq, void *offset) { @@ -1323,4 +1322,3 @@ const struct file_operations acpi_processor_throttling_fops = { .llseek = seq_lseek, .release = single_release, }; -#endif diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index e143203254a..cf82989ae75 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -70,10 +70,10 @@ static int acpi_sleep_prepare(u32 acpi_state) } ACPI_FLUSH_CPU_CACHE(); - acpi_enable_wakeup_device_prep(acpi_state); #endif printk(KERN_INFO PREFIX "Preparing to enter system sleep state S%d\n", acpi_state); + acpi_enable_wakeup_devices(acpi_state); acpi_enter_sleep_state_prep(acpi_state); return 0; } @@ -119,6 +119,16 @@ static int acpi_pm_freeze(void) } /** + * acpi_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS. + */ +static int acpi_pm_pre_suspend(void) +{ + acpi_pm_freeze(); + suspend_nvs_save(); + return 0; +} + +/** * __acpi_pm_prepare - Prepare the platform to enter the target state. * * If necessary, set the firmware waking vector and do arch-specific @@ -127,11 +137,9 @@ static int acpi_pm_freeze(void) static int __acpi_pm_prepare(void) { int error = acpi_sleep_prepare(acpi_target_sleep_state); - - suspend_nvs_save(); - if (error) acpi_target_sleep_state = ACPI_STATE_S0; + return error; } @@ -142,9 +150,8 @@ static int __acpi_pm_prepare(void) static int acpi_pm_prepare(void) { int error = __acpi_pm_prepare(); - if (!error) - acpi_pm_freeze(); + acpi_pm_pre_suspend(); return error; } @@ -159,7 +166,6 @@ static void acpi_pm_finish(void) { u32 acpi_state = acpi_target_sleep_state; - suspend_nvs_free(); acpi_ec_unblock_transactions(); if (acpi_state == ACPI_STATE_S0) @@ -167,7 +173,7 @@ static void acpi_pm_finish(void) printk(KERN_INFO PREFIX "Waking up from system sleep state S%d\n", acpi_state); - acpi_disable_wakeup_device(acpi_state); + acpi_disable_wakeup_devices(acpi_state); acpi_leave_sleep_state(acpi_state); /* reset firmware waking vector */ @@ -181,6 +187,7 @@ static void acpi_pm_finish(void) */ static void acpi_pm_end(void) { + suspend_nvs_free(); /* * This is necessary in case acpi_pm_finish() is not called during a * failing transition to a sleep state. @@ -251,7 +258,6 @@ static int acpi_suspend_enter(suspend_state_t pm_state) } local_irq_save(flags); - acpi_enable_wakeup_device(acpi_state); switch (acpi_state) { case ACPI_STATE_S1: barrier(); @@ -297,11 +303,6 @@ static int acpi_suspend_enter(suspend_state_t pm_state) return ACPI_SUCCESS(status) ? 0 : -EFAULT; } -static void acpi_suspend_finish(void) -{ - acpi_pm_finish(); -} - static int acpi_suspend_state_valid(suspend_state_t pm_state) { u32 acpi_state; @@ -323,7 +324,7 @@ static struct platform_suspend_ops acpi_suspend_ops = { .begin = acpi_suspend_begin, .prepare_late = acpi_pm_prepare, .enter = acpi_suspend_enter, - .wake = acpi_suspend_finish, + .wake = acpi_pm_finish, .end = acpi_pm_end, }; @@ -336,9 +337,9 @@ static struct platform_suspend_ops acpi_suspend_ops = { static int acpi_suspend_begin_old(suspend_state_t pm_state) { int error = acpi_suspend_begin(pm_state); - if (!error) error = __acpi_pm_prepare(); + return error; } @@ -349,9 +350,9 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state) static struct platform_suspend_ops acpi_suspend_ops_old = { .valid = acpi_suspend_state_valid, .begin = acpi_suspend_begin_old, - .prepare_late = acpi_pm_freeze, + .prepare_late = acpi_pm_pre_suspend, .enter = acpi_suspend_enter, - .wake = acpi_suspend_finish, + .wake = acpi_pm_finish, .end = acpi_pm_end, .recover = acpi_pm_finish, }; @@ -423,16 +424,6 @@ static int acpi_hibernation_begin(void) return error; } -static int acpi_hibernation_pre_snapshot(void) -{ - int error = acpi_pm_prepare(); - - if (!error) - suspend_nvs_save(); - - return error; -} - static int acpi_hibernation_enter(void) { acpi_status status = AE_OK; @@ -441,7 +432,6 @@ static int acpi_hibernation_enter(void) ACPI_FLUSH_CPU_CACHE(); local_irq_save(flags); - acpi_enable_wakeup_device(ACPI_STATE_S4); /* This shouldn't return. If it returns, we have a problem */ status = acpi_enter_sleep_state(ACPI_STATE_S4); /* Reprogram control registers and execute _BFS */ @@ -481,7 +471,7 @@ static void acpi_pm_thaw(void) static struct platform_hibernation_ops acpi_hibernation_ops = { .begin = acpi_hibernation_begin, .end = acpi_pm_end, - .pre_snapshot = acpi_hibernation_pre_snapshot, + .pre_snapshot = acpi_pm_prepare, .finish = acpi_pm_finish, .prepare = acpi_pm_prepare, .enter = acpi_hibernation_enter, @@ -517,13 +507,6 @@ static int acpi_hibernation_begin_old(void) return error; } -static int acpi_hibernation_pre_snapshot_old(void) -{ - acpi_pm_freeze(); - suspend_nvs_save(); - return 0; -} - /* * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has * been requested. @@ -531,7 +514,7 @@ static int acpi_hibernation_pre_snapshot_old(void) static struct platform_hibernation_ops acpi_hibernation_ops_old = { .begin = acpi_hibernation_begin_old, .end = acpi_pm_end, - .pre_snapshot = acpi_hibernation_pre_snapshot_old, + .pre_snapshot = acpi_pm_pre_suspend, .prepare = acpi_pm_freeze, .finish = acpi_pm_finish, .enter = acpi_hibernation_enter, @@ -686,7 +669,6 @@ static void acpi_power_off(void) /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ printk(KERN_DEBUG "%s called\n", __func__); local_irq_disable(); - acpi_enable_wakeup_device(ACPI_STATE_S5); acpi_enter_sleep_state(ACPI_STATE_S5); } diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h index 25b8bd14928..d8821805c3b 100644 --- a/drivers/acpi/sleep.h +++ b/drivers/acpi/sleep.h @@ -2,9 +2,8 @@ extern u8 sleep_states[]; extern int acpi_suspend(u32 state); -extern void acpi_enable_wakeup_device_prep(u8 sleep_state); -extern void acpi_enable_wakeup_device(u8 sleep_state); -extern void acpi_disable_wakeup_device(u8 sleep_state); +extern void acpi_enable_wakeup_devices(u8 sleep_state); +extern void acpi_disable_wakeup_devices(u8 sleep_state); extern struct list_head acpi_wakeup_device_list; extern struct mutex acpi_device_lock; diff --git a/drivers/acpi/system.c b/drivers/acpi/sysfs.c index 5981bd07e20..68e2e4582fa 100644 --- a/drivers/acpi/system.c +++ b/drivers/acpi/sysfs.c @@ -1,51 +1,218 @@ /* - * acpi_system.c - ACPI System Driver ($Revision: 63 $) - * - * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> - * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * sysfs.c - ACPI sysfs interface to userspace. */ -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <linux/slab.h> #include <linux/init.h> -#include <linux/string.h> -#include <asm/uaccess.h> - +#include <linux/kernel.h> +#include <linux/moduleparam.h> #include <acpi/acpi_drivers.h> -#define PREFIX "ACPI: " - #define _COMPONENT ACPI_SYSTEM_COMPONENT -ACPI_MODULE_NAME("system"); - -#define ACPI_SYSTEM_CLASS "system" -#define ACPI_SYSTEM_DEVICE_NAME "System" +ACPI_MODULE_NAME("sysfs"); -u32 acpi_irq_handled; -u32 acpi_irq_not_handled; +#define PREFIX "ACPI: " +#ifdef CONFIG_ACPI_DEBUG /* - * Make ACPICA version work as module param + * ACPI debug sysfs I/F, including: + * /sys/modules/acpi/parameters/debug_layer + * /sys/modules/acpi/parameters/debug_level + * /sys/modules/acpi/parameters/trace_method_name + * /sys/modules/acpi/parameters/trace_state + * /sys/modules/acpi/parameters/trace_debug_layer + * /sys/modules/acpi/parameters/trace_debug_level */ + +struct acpi_dlayer { + const char *name; + unsigned long value; +}; +struct acpi_dlevel { + const char *name; + unsigned long value; +}; +#define ACPI_DEBUG_INIT(v) { .name = #v, .value = v } + +static const struct acpi_dlayer acpi_debug_layers[] = { + ACPI_DEBUG_INIT(ACPI_UTILITIES), + ACPI_DEBUG_INIT(ACPI_HARDWARE), + ACPI_DEBUG_INIT(ACPI_EVENTS), + ACPI_DEBUG_INIT(ACPI_TABLES), + ACPI_DEBUG_INIT(ACPI_NAMESPACE), + ACPI_DEBUG_INIT(ACPI_PARSER), + ACPI_DEBUG_INIT(ACPI_DISPATCHER), + ACPI_DEBUG_INIT(ACPI_EXECUTER), + ACPI_DEBUG_INIT(ACPI_RESOURCES), + ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER), + ACPI_DEBUG_INIT(ACPI_OS_SERVICES), + ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER), + ACPI_DEBUG_INIT(ACPI_COMPILER), + ACPI_DEBUG_INIT(ACPI_TOOLS), + + ACPI_DEBUG_INIT(ACPI_BUS_COMPONENT), + ACPI_DEBUG_INIT(ACPI_AC_COMPONENT), + ACPI_DEBUG_INIT(ACPI_BATTERY_COMPONENT), + ACPI_DEBUG_INIT(ACPI_BUTTON_COMPONENT), + ACPI_DEBUG_INIT(ACPI_SBS_COMPONENT), + ACPI_DEBUG_INIT(ACPI_FAN_COMPONENT), + ACPI_DEBUG_INIT(ACPI_PCI_COMPONENT), + ACPI_DEBUG_INIT(ACPI_POWER_COMPONENT), + ACPI_DEBUG_INIT(ACPI_CONTAINER_COMPONENT), + ACPI_DEBUG_INIT(ACPI_SYSTEM_COMPONENT), + ACPI_DEBUG_INIT(ACPI_THERMAL_COMPONENT), + ACPI_DEBUG_INIT(ACPI_MEMORY_DEVICE_COMPONENT), + ACPI_DEBUG_INIT(ACPI_VIDEO_COMPONENT), + ACPI_DEBUG_INIT(ACPI_PROCESSOR_COMPONENT), +}; + +static const struct acpi_dlevel acpi_debug_levels[] = { + ACPI_DEBUG_INIT(ACPI_LV_INIT), + ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT), + ACPI_DEBUG_INIT(ACPI_LV_INFO), + + ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES), + ACPI_DEBUG_INIT(ACPI_LV_PARSE), + ACPI_DEBUG_INIT(ACPI_LV_LOAD), + ACPI_DEBUG_INIT(ACPI_LV_DISPATCH), + ACPI_DEBUG_INIT(ACPI_LV_EXEC), + ACPI_DEBUG_INIT(ACPI_LV_NAMES), + ACPI_DEBUG_INIT(ACPI_LV_OPREGION), + ACPI_DEBUG_INIT(ACPI_LV_BFIELD), + ACPI_DEBUG_INIT(ACPI_LV_TABLES), + ACPI_DEBUG_INIT(ACPI_LV_VALUES), + ACPI_DEBUG_INIT(ACPI_LV_OBJECTS), + ACPI_DEBUG_INIT(ACPI_LV_RESOURCES), + ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS), + ACPI_DEBUG_INIT(ACPI_LV_PACKAGE), + + ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS), + ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS), + ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS), + + ACPI_DEBUG_INIT(ACPI_LV_MUTEX), + ACPI_DEBUG_INIT(ACPI_LV_THREADS), + ACPI_DEBUG_INIT(ACPI_LV_IO), + ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS), + + ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE), + ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO), + ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES), + ACPI_DEBUG_INIT(ACPI_LV_EVENTS), +}; + +static int param_get_debug_layer(char *buffer, struct kernel_param *kp) +{ + int result = 0; + int i; + + result = sprintf(buffer, "%-25s\tHex SET\n", "Description"); + + for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) { + result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n", + acpi_debug_layers[i].name, + acpi_debug_layers[i].value, + (acpi_dbg_layer & acpi_debug_layers[i].value) + ? '*' : ' '); + } + result += + sprintf(buffer + result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS", + ACPI_ALL_DRIVERS, + (acpi_dbg_layer & ACPI_ALL_DRIVERS) == + ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & ACPI_ALL_DRIVERS) + == 0 ? ' ' : '-'); + result += + sprintf(buffer + result, + "--\ndebug_layer = 0x%08X ( * = enabled)\n", + acpi_dbg_layer); + + return result; +} + +static int param_get_debug_level(char *buffer, struct kernel_param *kp) +{ + int result = 0; + int i; + + result = sprintf(buffer, "%-25s\tHex SET\n", "Description"); + + for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) { + result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n", + acpi_debug_levels[i].name, + acpi_debug_levels[i].value, + (acpi_dbg_level & acpi_debug_levels[i].value) + ? '*' : ' '); + } + result += + sprintf(buffer + result, "--\ndebug_level = 0x%08X (* = enabled)\n", + acpi_dbg_level); + + return result; +} + +module_param_call(debug_layer, param_set_uint, param_get_debug_layer, + &acpi_dbg_layer, 0644); +module_param_call(debug_level, param_set_uint, param_get_debug_level, + &acpi_dbg_level, 0644); + +static char trace_method_name[6]; +module_param_string(trace_method_name, trace_method_name, 6, 0644); +static unsigned int trace_debug_layer; +module_param(trace_debug_layer, uint, 0644); +static unsigned int trace_debug_level; +module_param(trace_debug_level, uint, 0644); + +static int param_set_trace_state(const char *val, struct kernel_param *kp) +{ + int result = 0; + + if (!strncmp(val, "enable", strlen("enable") - 1)) { + result = acpi_debug_trace(trace_method_name, trace_debug_level, + trace_debug_layer, 0); + if (result) + result = -EBUSY; + goto exit; + } + + if (!strncmp(val, "disable", strlen("disable") - 1)) { + int name = 0; + result = acpi_debug_trace((char *)&name, trace_debug_level, + trace_debug_layer, 0); + if (result) + result = -EBUSY; + goto exit; + } + + if (!strncmp(val, "1", 1)) { + result = acpi_debug_trace(trace_method_name, trace_debug_level, + trace_debug_layer, 1); + if (result) + result = -EBUSY; + goto exit; + } + + result = -EINVAL; +exit: + return result; +} + +static int param_get_trace_state(char *buffer, struct kernel_param *kp) +{ + if (!acpi_gbl_trace_method_name) + return sprintf(buffer, "disable"); + else { + if (acpi_gbl_trace_flags & 1) + return sprintf(buffer, "1"); + else + return sprintf(buffer, "enable"); + } + return 0; +} + +module_param_call(trace_state, param_set_trace_state, param_get_trace_state, + NULL, 0644); +#endif /* CONFIG_ACPI_DEBUG */ + +/* /sys/module/acpi/parameters/acpica_version */ static int param_get_acpica_version(char *buffer, struct kernel_param *kp) { int result; @@ -57,9 +224,12 @@ static int param_get_acpica_version(char *buffer, struct kernel_param *kp) module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444); -/* -------------------------------------------------------------------------- - FS Interface (/sys) - -------------------------------------------------------------------------- */ +/* + * ACPI table sysfs I/F: + * /sys/firmware/acpi/tables/ + * /sys/firmware/acpi/tables/dynamic/ + */ + static LIST_HEAD(acpi_table_attr_list); static struct kobject *tables_kobj; static struct kobject *dynamic_tables_kobj; @@ -86,14 +256,12 @@ static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj, else memcpy(name, "\0\0\0\0", 4); - status = - acpi_get_table(name, table_attr->instance, - &table_header); + status = acpi_get_table(name, table_attr->instance, &table_header); if (ACPI_FAILURE(status)) return -ENODEV; return memory_read_from_buffer(buf, count, &offset, - table_header, table_header->length); + table_header, table_header->length); } static void acpi_table_attr_init(struct acpi_table_attr *table_attr, @@ -105,7 +273,7 @@ static void acpi_table_attr_init(struct acpi_table_attr *table_attr, sysfs_attr_init(&table_attr->attr.attr); if (table_header->signature[0] != '\0') memcpy(table_attr->name, table_header->signature, - ACPI_NAME_SIZE); + ACPI_NAME_SIZE); else memcpy(table_attr->name, "NULL", 4); @@ -117,8 +285,8 @@ static void acpi_table_attr_init(struct acpi_table_attr *table_attr, table_attr->instance++; if (table_attr->instance > 1 || (table_attr->instance == 1 && - !acpi_get_table - (table_header->signature, 2, &header))) + !acpi_get_table + (table_header->signature, 2, &header))) sprintf(table_attr->name + ACPI_NAME_SIZE, "%d", table_attr->instance); @@ -138,18 +306,17 @@ acpi_sysfs_table_handler(u32 event, void *table, void *context) switch (event) { case ACPI_TABLE_EVENT_LOAD: table_attr = - kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL); + kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL); if (!table_attr) return AE_NO_MEMORY; acpi_table_attr_init(table_attr, table); if (sysfs_create_bin_file(dynamic_tables_kobj, - &table_attr->attr)) { + &table_attr->attr)) { kfree(table_attr); return AE_ERROR; } else - list_add_tail(&table_attr->node, - &acpi_table_attr_list); + list_add_tail(&table_attr->node, &acpi_table_attr_list); break; case ACPI_TABLE_EVENT_UNLOAD: /* @@ -164,7 +331,7 @@ acpi_sysfs_table_handler(u32 event, void *table, void *context) return AE_OK; } -static int acpi_system_sysfs_init(void) +static int acpi_tables_sysfs_init(void) { struct acpi_table_attr *table_attr; struct acpi_table_header *table_header = NULL; @@ -213,14 +380,17 @@ err: } /* - * Detailed ACPI IRQ counters in /sys/firmware/acpi/interrupts/ - * See Documentation/ABI/testing/sysfs-firmware-acpi + * Detailed ACPI IRQ counters: + * /sys/firmware/acpi/interrupts/ */ +u32 acpi_irq_handled; +u32 acpi_irq_not_handled; + #define COUNT_GPE 0 -#define COUNT_SCI 1 /* acpi_irq_handled */ -#define COUNT_SCI_NOT 2 /* acpi_irq_not_handled */ -#define COUNT_ERROR 3 /* other */ +#define COUNT_SCI 1 /* acpi_irq_handled */ +#define COUNT_SCI_NOT 2 /* acpi_irq_not_handled */ +#define COUNT_ERROR 3 /* other */ #define NUM_COUNTERS_EXTRA 4 struct event_counter { @@ -237,6 +407,7 @@ static u32 acpi_gpe_count; static struct attribute_group interrupt_stats_attr_group = { .name = "interrupts", }; + static struct kobj_attribute *counter_attrs; static void delete_gpe_attr_array(void) @@ -269,8 +440,8 @@ void acpi_os_gpe_count(u32 gpe_number) if (gpe_number < num_gpes) all_counters[gpe_number].count++; else - all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR]. - count++; + all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + + COUNT_ERROR].count++; return; } @@ -283,13 +454,14 @@ void acpi_os_fixed_event_count(u32 event_number) if (event_number < ACPI_NUM_FIXED_EVENTS) all_counters[num_gpes + event_number].count++; else - all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR]. - count++; + all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + + COUNT_ERROR].count++; return; } -static int get_status(u32 index, acpi_event_status *status, acpi_handle *handle) +static int get_status(u32 index, acpi_event_status *status, + acpi_handle *handle) { int result = 0; @@ -300,7 +472,7 @@ static int get_status(u32 index, acpi_event_status *status, acpi_handle *handle) result = acpi_get_gpe_device(index, handle); if (result) { ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND, - "Invalid GPE 0x%x\n", index)); + "Invalid GPE 0x%x\n", index)); goto end; } result = acpi_get_gpe_status(*handle, index, status); @@ -312,7 +484,7 @@ end: } static ssize_t counter_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { int index = attr - counter_attrs; int size; @@ -321,12 +493,11 @@ static ssize_t counter_show(struct kobject *kobj, int result = 0; all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count = - acpi_irq_handled; + acpi_irq_handled; all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT].count = - acpi_irq_not_handled; + acpi_irq_not_handled; all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count = - acpi_gpe_count; - + acpi_gpe_count; size = sprintf(buf, "%8d", all_counters[index].count); /* "gpe_all" or "sci" */ @@ -338,13 +509,13 @@ static ssize_t counter_show(struct kobject *kobj, goto end; if (!(status & ACPI_EVENT_FLAG_HANDLE)) - size += sprintf(buf + size, " invalid"); + size += sprintf(buf + size, " invalid"); else if (status & ACPI_EVENT_FLAG_ENABLED) - size += sprintf(buf + size, " enabled"); + size += sprintf(buf + size, " enabled"); else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED) - size += sprintf(buf + size, " wake_enabled"); + size += sprintf(buf + size, " wake_enabled"); else - size += sprintf(buf + size, " disabled"); + size += sprintf(buf + size, " disabled"); end: size += sprintf(buf + size, "\n"); @@ -357,7 +528,8 @@ end: * enable/disable/clear a gpe/fixed event in user space. */ static ssize_t counter_set(struct kobject *kobj, - struct kobj_attribute *attr, const char *buf, size_t size) + struct kobj_attribute *attr, const char *buf, + size_t size) { int index = attr - counter_attrs; acpi_event_status status; @@ -381,32 +553,32 @@ static ssize_t counter_set(struct kobject *kobj, if (!(status & ACPI_EVENT_FLAG_HANDLE)) { printk(KERN_WARNING PREFIX - "Can not change Invalid GPE/Fixed Event status\n"); + "Can not change Invalid GPE/Fixed Event status\n"); return -EINVAL; } if (index < num_gpes) { if (!strcmp(buf, "disable\n") && - (status & ACPI_EVENT_FLAG_ENABLED)) + (status & ACPI_EVENT_FLAG_ENABLED)) result = acpi_disable_gpe(handle, index); else if (!strcmp(buf, "enable\n") && - !(status & ACPI_EVENT_FLAG_ENABLED)) + !(status & ACPI_EVENT_FLAG_ENABLED)) result = acpi_enable_gpe(handle, index); else if (!strcmp(buf, "clear\n") && - (status & ACPI_EVENT_FLAG_SET)) + (status & ACPI_EVENT_FLAG_SET)) result = acpi_clear_gpe(handle, index); else all_counters[index].count = strtoul(buf, NULL, 0); } else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) { int event = index - num_gpes; if (!strcmp(buf, "disable\n") && - (status & ACPI_EVENT_FLAG_ENABLED)) + (status & ACPI_EVENT_FLAG_ENABLED)) result = acpi_disable_event(event, ACPI_NOT_ISR); else if (!strcmp(buf, "enable\n") && - !(status & ACPI_EVENT_FLAG_ENABLED)) + !(status & ACPI_EVENT_FLAG_ENABLED)) result = acpi_enable_event(event, ACPI_NOT_ISR); else if (!strcmp(buf, "clear\n") && - (status & ACPI_EVENT_FLAG_SET)) + (status & ACPI_EVENT_FLAG_SET)) result = acpi_clear_event(event); else all_counters[index].count = strtoul(buf, NULL, 0); @@ -430,17 +602,17 @@ void acpi_irq_stats_init(void) num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA; all_attrs = kzalloc(sizeof(struct attribute *) * (num_counters + 1), - GFP_KERNEL); + GFP_KERNEL); if (all_attrs == NULL) return; all_counters = kzalloc(sizeof(struct event_counter) * (num_counters), - GFP_KERNEL); + GFP_KERNEL); if (all_counters == NULL) goto fail; counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters), - GFP_KERNEL); + GFP_KERNEL); if (counter_attrs == NULL) goto fail; @@ -503,135 +675,11 @@ static void __exit interrupt_stats_exit(void) return; } -/* -------------------------------------------------------------------------- - FS Interface (/proc) - -------------------------------------------------------------------------- */ -#ifdef CONFIG_ACPI_PROCFS -#define ACPI_SYSTEM_FILE_INFO "info" -#define ACPI_SYSTEM_FILE_EVENT "event" -#define ACPI_SYSTEM_FILE_DSDT "dsdt" -#define ACPI_SYSTEM_FILE_FADT "fadt" - -static int acpi_system_read_info(struct seq_file *seq, void *offset) -{ - - seq_printf(seq, "version: %x\n", ACPI_CA_VERSION); - return 0; -} - -static int acpi_system_info_open_fs(struct inode *inode, struct file *file) -{ - return single_open(file, acpi_system_read_info, PDE(inode)->data); -} - -static const struct file_operations acpi_system_info_ops = { - .owner = THIS_MODULE, - .open = acpi_system_info_open_fs, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static ssize_t acpi_system_read_dsdt(struct file *, char __user *, size_t, - loff_t *); - -static const struct file_operations acpi_system_dsdt_ops = { - .owner = THIS_MODULE, - .read = acpi_system_read_dsdt, -}; - -static ssize_t -acpi_system_read_dsdt(struct file *file, - char __user * buffer, size_t count, loff_t * ppos) -{ - acpi_status status = AE_OK; - struct acpi_table_header *dsdt = NULL; - ssize_t res; - - status = acpi_get_table(ACPI_SIG_DSDT, 1, &dsdt); - if (ACPI_FAILURE(status)) - return -ENODEV; - - res = simple_read_from_buffer(buffer, count, ppos, dsdt, dsdt->length); - - return res; -} - -static ssize_t acpi_system_read_fadt(struct file *, char __user *, size_t, - loff_t *); - -static const struct file_operations acpi_system_fadt_ops = { - .owner = THIS_MODULE, - .read = acpi_system_read_fadt, -}; - -static ssize_t -acpi_system_read_fadt(struct file *file, - char __user * buffer, size_t count, loff_t * ppos) -{ - acpi_status status = AE_OK; - struct acpi_table_header *fadt = NULL; - ssize_t res; - - status = acpi_get_table(ACPI_SIG_FADT, 1, &fadt); - if (ACPI_FAILURE(status)) - return -ENODEV; - - res = simple_read_from_buffer(buffer, count, ppos, fadt, fadt->length); - - return res; -} - -static int acpi_system_procfs_init(void) -{ - struct proc_dir_entry *entry; - int error = 0; - - /* 'info' [R] */ - entry = proc_create(ACPI_SYSTEM_FILE_INFO, S_IRUGO, acpi_root_dir, - &acpi_system_info_ops); - if (!entry) - goto Error; - - /* 'dsdt' [R] */ - entry = proc_create(ACPI_SYSTEM_FILE_DSDT, S_IRUSR, acpi_root_dir, - &acpi_system_dsdt_ops); - if (!entry) - goto Error; - - /* 'fadt' [R] */ - entry = proc_create(ACPI_SYSTEM_FILE_FADT, S_IRUSR, acpi_root_dir, - &acpi_system_fadt_ops); - if (!entry) - goto Error; - - Done: - return error; - - Error: - remove_proc_entry(ACPI_SYSTEM_FILE_FADT, acpi_root_dir); - remove_proc_entry(ACPI_SYSTEM_FILE_DSDT, acpi_root_dir); - remove_proc_entry(ACPI_SYSTEM_FILE_INFO, acpi_root_dir); - - error = -EFAULT; - goto Done; -} -#else -static int acpi_system_procfs_init(void) -{ - return 0; -} -#endif - -int __init acpi_system_init(void) +int __init acpi_sysfs_init(void) { int result; - result = acpi_system_procfs_init(); - if (result) - return result; - - result = acpi_system_sysfs_init(); + result = acpi_tables_sysfs_init(); return result; } diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index efad1f33aeb..2f8f17131d9 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -37,10 +37,14 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> + +#ifdef CONFIG_ACPI_PROCFS #include <linux/proc_fs.h> +#include <linux/seq_file.h> +#endif + #include <linux/jiffies.h> #include <linux/kmod.h> -#include <linux/seq_file.h> #include <linux/reboot.h> #include <linux/device.h> #include <asm/uaccess.h> @@ -102,16 +106,6 @@ static int acpi_thermal_add(struct acpi_device *device); static int acpi_thermal_remove(struct acpi_device *device, int type); static int acpi_thermal_resume(struct acpi_device *device); static void acpi_thermal_notify(struct acpi_device *device, u32 event); -static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file); -static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file); -static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file); -static int acpi_thermal_cooling_open_fs(struct inode *inode, struct file *file); -static ssize_t acpi_thermal_write_cooling_mode(struct file *, - const char __user *, size_t, - loff_t *); -static int acpi_thermal_polling_open_fs(struct inode *inode, struct file *file); -static ssize_t acpi_thermal_write_polling(struct file *, const char __user *, - size_t, loff_t *); static const struct acpi_device_id thermal_device_ids[] = { {ACPI_THERMAL_HID, 0}, @@ -201,6 +195,18 @@ struct acpi_thermal { struct mutex lock; }; +#ifdef CONFIG_ACPI_PROCFS +static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file); +static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file); +static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file); +static int acpi_thermal_cooling_open_fs(struct inode *inode, struct file *file); +static ssize_t acpi_thermal_write_cooling_mode(struct file *, + const char __user *, size_t, + loff_t *); +static int acpi_thermal_polling_open_fs(struct inode *inode, struct file *file); +static ssize_t acpi_thermal_write_polling(struct file *, const char __user *, + size_t, loff_t *); + static const struct file_operations acpi_thermal_state_fops = { .owner = THIS_MODULE, .open = acpi_thermal_state_open_fs, @@ -242,6 +248,7 @@ static const struct file_operations acpi_thermal_polling_fops = { .llseek = seq_lseek, .release = single_release, }; +#endif /* CONFIG_ACPI_PROCFS*/ /* -------------------------------------------------------------------------- Thermal Zone Management @@ -287,26 +294,6 @@ static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz) return 0; } -static int acpi_thermal_set_polling(struct acpi_thermal *tz, int seconds) -{ - - if (!tz) - return -EINVAL; - - tz->polling_frequency = seconds * 10; /* Convert value to deci-seconds */ - - tz->thermal_zone->polling_delay = seconds * 1000; - - if (tz->tz_enabled) - thermal_zone_device_update(tz->thermal_zone); - - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Polling frequency set to %lu seconds\n", - tz->polling_frequency/10)); - - return 0; -} - static int acpi_thermal_set_cooling_mode(struct acpi_thermal *tz, int mode) { acpi_status status = AE_OK; @@ -973,7 +960,7 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz) /* -------------------------------------------------------------------------- FS Interface (/proc) -------------------------------------------------------------------------- */ - +#ifdef CONFIG_ACPI_PROCFS static struct proc_dir_entry *acpi_thermal_dir; static int acpi_thermal_state_seq_show(struct seq_file *seq, void *offset) @@ -1187,6 +1174,26 @@ static int acpi_thermal_polling_open_fs(struct inode *inode, struct file *file) PDE(inode)->data); } +static int acpi_thermal_set_polling(struct acpi_thermal *tz, int seconds) +{ + if (!tz) + return -EINVAL; + + /* Convert value to deci-seconds */ + tz->polling_frequency = seconds * 10; + + tz->thermal_zone->polling_delay = seconds * 1000; + + if (tz->tz_enabled) + thermal_zone_device_update(tz->thermal_zone); + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Polling frequency set to %lu seconds\n", + tz->polling_frequency/10)); + + return 0; +} + static ssize_t acpi_thermal_write_polling(struct file *file, const char __user * buffer, @@ -1295,7 +1302,13 @@ static int acpi_thermal_remove_fs(struct acpi_device *device) return 0; } - +#else +static inline int acpi_thermal_add_fs(struct acpi_device *device) { return 0; } +static inline int acpi_thermal_remove_fs(struct acpi_device *device) +{ + return 0; +} +#endif /* CONFIG_ACPI_PROCFS */ /* -------------------------------------------------------------------------- Driver Interface -------------------------------------------------------------------------- */ @@ -1566,13 +1579,18 @@ static int __init acpi_thermal_init(void) printk(KERN_NOTICE "ACPI: thermal control disabled\n"); return -ENODEV; } + +#ifdef CONFIG_ACPI_PROCFS acpi_thermal_dir = proc_mkdir(ACPI_THERMAL_CLASS, acpi_root_dir); if (!acpi_thermal_dir) return -ENODEV; +#endif result = acpi_bus_register_driver(&acpi_thermal_driver); if (result < 0) { +#ifdef CONFIG_ACPI_PROCFS remove_proc_entry(ACPI_THERMAL_CLASS, acpi_root_dir); +#endif return -ENODEV; } @@ -1584,7 +1602,9 @@ static void __exit acpi_thermal_exit(void) acpi_bus_unregister_driver(&acpi_thermal_driver); +#ifdef CONFIG_ACPI_PROCFS remove_proc_entry(ACPI_THERMAL_CLASS, acpi_root_dir); +#endif return; } diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 9865d46f49a..67dec0c675a 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -152,7 +152,9 @@ struct acpi_video_bus { struct acpi_video_bus_flags flags; struct list_head video_device_list; struct mutex device_list_lock; /* protects video_device_list */ +#ifdef CONFIG_ACPI_PROCFS struct proc_dir_entry *dir; +#endif struct input_dev *input; char phys[32]; /* for input device */ struct notifier_block pm_nb; @@ -208,6 +210,7 @@ struct acpi_video_device { struct output_device *output_dev; }; +#ifdef CONFIG_ACPI_PROCFS /* bus */ static int acpi_video_bus_info_open_fs(struct inode *inode, struct file *file); static const struct file_operations acpi_video_bus_info_fops = { @@ -307,6 +310,7 @@ static const struct file_operations acpi_video_device_EDID_fops = { .llseek = seq_lseek, .release = single_release, }; +#endif /* CONFIG_ACPI_PROCFS */ static const char device_decode[][30] = { "motherboard VGA device", @@ -450,16 +454,6 @@ static struct thermal_cooling_device_ops video_cooling_ops = { /* device */ static int -acpi_video_device_query(struct acpi_video_device *device, unsigned long long *state) -{ - int status; - - status = acpi_evaluate_integer(device->dev->handle, "_DGS", NULL, state); - - return status; -} - -static int acpi_video_device_get_state(struct acpi_video_device *device, unsigned long long *state) { @@ -698,46 +692,6 @@ acpi_video_device_EDID(struct acpi_video_device *device, /* bus */ -static int -acpi_video_bus_set_POST(struct acpi_video_bus *video, unsigned long option) -{ - int status; - unsigned long long tmp; - union acpi_object arg0 = { ACPI_TYPE_INTEGER }; - struct acpi_object_list args = { 1, &arg0 }; - - - arg0.integer.value = option; - - status = acpi_evaluate_integer(video->device->handle, "_SPD", &args, &tmp); - if (ACPI_SUCCESS(status)) - status = tmp ? (-EINVAL) : (AE_OK); - - return status; -} - -static int -acpi_video_bus_get_POST(struct acpi_video_bus *video, unsigned long long *id) -{ - int status; - - status = acpi_evaluate_integer(video->device->handle, "_GPD", NULL, id); - - return status; -} - -static int -acpi_video_bus_POST_options(struct acpi_video_bus *video, - unsigned long long *options) -{ - int status; - - status = acpi_evaluate_integer(video->device->handle, "_VPO", NULL, options); - *options &= 3; - - return status; -} - /* * Arg: * video : video bus device pointer @@ -1159,6 +1113,7 @@ static int acpi_video_bus_check(struct acpi_video_bus *video) /* -------------------------------------------------------------------------- FS Interface (/proc) -------------------------------------------------------------------------- */ +#ifdef CONFIG_ACPI_PROCFS static struct proc_dir_entry *acpi_video_dir; @@ -1198,6 +1153,18 @@ acpi_video_device_info_open_fs(struct inode *inode, struct file *file) PDE(inode)->data); } +static int +acpi_video_device_query(struct acpi_video_device *device, + unsigned long long *state) +{ + int status; + + status = acpi_evaluate_integer(device->dev->handle, "_DGS", + NULL, state); + + return status; +} + static int acpi_video_device_state_seq_show(struct seq_file *seq, void *offset) { int status; @@ -1492,6 +1459,19 @@ static int acpi_video_bus_ROM_open_fs(struct inode *inode, struct file *file) return single_open(file, acpi_video_bus_ROM_seq_show, PDE(inode)->data); } +static int +acpi_video_bus_POST_options(struct acpi_video_bus *video, + unsigned long long *options) +{ + int status; + + status = acpi_evaluate_integer(video->device->handle, "_VPO", + NULL, options); + *options &= 3; + + return status; +} + static int acpi_video_bus_POST_info_seq_show(struct seq_file *seq, void *offset) { struct acpi_video_bus *video = seq->private; @@ -1530,6 +1510,16 @@ acpi_video_bus_POST_info_open_fs(struct inode *inode, struct file *file) PDE(inode)->data); } +static int +acpi_video_bus_get_POST(struct acpi_video_bus *video, unsigned long long *id) +{ + int status; + + status = acpi_evaluate_integer(video->device->handle, "_GPD", NULL, id); + + return status; +} + static int acpi_video_bus_POST_seq_show(struct seq_file *seq, void *offset) { struct acpi_video_bus *video = seq->private; @@ -1572,6 +1562,25 @@ static int acpi_video_bus_DOS_open_fs(struct inode *inode, struct file *file) return single_open(file, acpi_video_bus_DOS_seq_show, PDE(inode)->data); } +static int +acpi_video_bus_set_POST(struct acpi_video_bus *video, unsigned long option) +{ + int status; + unsigned long long tmp; + union acpi_object arg0 = { ACPI_TYPE_INTEGER }; + struct acpi_object_list args = { 1, &arg0 }; + + + arg0.integer.value = option; + + status = acpi_evaluate_integer(video->device->handle, "_SPD", + &args, &tmp); + if (ACPI_SUCCESS(status)) + status = tmp ? (-EINVAL) : (AE_OK); + + return status; +} + static ssize_t acpi_video_bus_write_POST(struct file *file, const char __user * buffer, @@ -1722,6 +1731,24 @@ static int acpi_video_bus_remove_fs(struct acpi_device *device) return 0; } +#else +static inline int acpi_video_device_add_fs(struct acpi_device *device) +{ + return 0; +} +static inline int acpi_video_device_remove_fs(struct acpi_device *device) +{ + return 0; +} +static inline int acpi_video_bus_add_fs(struct acpi_device *device) +{ + return 0; +} +static inline int acpi_video_bus_remove_fs(struct acpi_device *device) +{ + return 0; +} +#endif /* CONFIG_ACPI_PROCFS */ /* -------------------------------------------------------------------------- Driver Interface @@ -2140,7 +2167,7 @@ acpi_video_bus_get_devices(struct acpi_video_bus *video, status = acpi_video_bus_get_one_device(dev, video); if (ACPI_FAILURE(status)) { printk(KERN_WARNING PREFIX - "Cant attach device"); + "Cant attach device\n"); continue; } } @@ -2150,19 +2177,19 @@ acpi_video_bus_get_devices(struct acpi_video_bus *video, static int acpi_video_bus_put_one_device(struct acpi_video_device *device) { acpi_status status; - struct acpi_video_bus *video; - if (!device || !device->video) return -ENOENT; - video = device->video; - acpi_video_device_remove_fs(device->dev); status = acpi_remove_notify_handler(device->dev->handle, ACPI_DEVICE_NOTIFY, acpi_video_device_notify); + if (ACPI_FAILURE(status)) { + printk(KERN_WARNING PREFIX + "Cant remove video notify handler\n"); + } if (device->backlight) { sysfs_remove_link(&device->backlight->dev.kobj, "device"); backlight_device_unregister(device->backlight); @@ -2557,9 +2584,11 @@ int acpi_video_register(void) return 0; } +#ifdef CONFIG_ACPI_PROCFS acpi_video_dir = proc_mkdir(ACPI_VIDEO_CLASS, acpi_root_dir); if (!acpi_video_dir) return -ENODEV; +#endif result = acpi_bus_register_driver(&acpi_video_bus); if (result < 0) { @@ -2588,7 +2617,9 @@ void acpi_video_unregister(void) } acpi_bus_unregister_driver(&acpi_video_bus); +#ifdef CONFIG_ACPI_PROCFS remove_proc_entry(ACPI_VIDEO_CLASS, acpi_root_dir); +#endif register_count = 0; diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c index c80537bc323..f62a50c3ed3 100644 --- a/drivers/acpi/wakeup.c +++ b/drivers/acpi/wakeup.c @@ -21,46 +21,18 @@ ACPI_MODULE_NAME("wakeup_devices") /** - * acpi_enable_wakeup_device_prep - Prepare wake-up devices. + * acpi_enable_wakeup_devices - Enable wake-up device GPEs. * @sleep_state: ACPI system sleep state. * - * Enable all wake-up devices' power, unless the requested system sleep state is - * too deep. + * Enable wakeup device power of devices with the state.enable flag set and set + * the wakeup enable mask bits in the GPE registers that correspond to wakeup + * devices. */ -void acpi_enable_wakeup_device_prep(u8 sleep_state) +void acpi_enable_wakeup_devices(u8 sleep_state) { struct list_head *node, *next; list_for_each_safe(node, next, &acpi_wakeup_device_list) { - struct acpi_device *dev = container_of(node, - struct acpi_device, - wakeup_list); - - if (!dev->wakeup.flags.valid || !dev->wakeup.state.enabled - || (sleep_state > (u32) dev->wakeup.sleep_state)) - continue; - - acpi_enable_wakeup_device_power(dev, sleep_state); - } -} - -/** - * acpi_enable_wakeup_device - Enable wake-up device GPEs. - * @sleep_state: ACPI system sleep state. - * - * Enable all wake-up devices' GPEs, with the assumption that - * acpi_disable_all_gpes() was executed before, so we don't need to disable any - * GPEs here. - */ -void acpi_enable_wakeup_device(u8 sleep_state) -{ - struct list_head *node, *next; - - /* - * Caution: this routine must be invoked when interrupt is disabled - * Refer ACPI2.0: P212 - */ - list_for_each_safe(node, next, &acpi_wakeup_device_list) { struct acpi_device *dev = container_of(node, struct acpi_device, wakeup_list); @@ -69,6 +41,9 @@ void acpi_enable_wakeup_device(u8 sleep_state) || sleep_state > (u32) dev->wakeup.sleep_state) continue; + if (dev->wakeup.state.enabled) + acpi_enable_wakeup_device_power(dev, sleep_state); + /* The wake-up power should have been enabled already. */ acpi_gpe_wakeup(dev->wakeup.gpe_device, dev->wakeup.gpe_number, ACPI_GPE_ENABLE); @@ -76,13 +51,10 @@ void acpi_enable_wakeup_device(u8 sleep_state) } /** - * acpi_disable_wakeup_device - Disable devices' wakeup capability. + * acpi_disable_wakeup_devices - Disable devices' wakeup capability. * @sleep_state: ACPI system sleep state. - * - * This function only affects devices with wakeup.state.enabled set, which means - * that it reverses the changes made by acpi_enable_wakeup_device_prep(). */ -void acpi_disable_wakeup_device(u8 sleep_state) +void acpi_disable_wakeup_devices(u8 sleep_state) { struct list_head *node, *next; diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 65e3e270837..11ec911016c 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -828,6 +828,7 @@ config PATA_SAMSUNG_CF config PATA_WINBOND_VLB tristate "Winbond W83759A VLB PATA support (Experimental)" depends on ISA && EXPERIMENTAL + select PATA_LEGACY help Support for the Winbond W83759A controller on Vesa Local Bus systems. diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index 158eaa961b1..d5df04a395c 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -89,7 +89,6 @@ obj-$(CONFIG_PATA_QDI) += pata_qdi.o obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o obj-$(CONFIG_PATA_SAMSUNG_CF) += pata_samsung_cf.o -obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o obj-$(CONFIG_PATA_PXA) += pata_pxa.o diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index fe75d8befc3..013727b2041 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -60,6 +60,7 @@ enum board_ids { board_ahci, board_ahci_ign_iferr, board_ahci_nosntf, + board_ahci_yes_fbs, /* board IDs for specific chipsets in alphabetical order */ board_ahci_mcp65, @@ -132,6 +133,14 @@ static const struct ata_port_info ahci_port_info[] = { .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }, + [board_ahci_yes_fbs] = + { + AHCI_HFLAGS (AHCI_HFLAG_YES_FBS), + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_ops, + }, /* by chipsets */ [board_ahci_mcp65] = { @@ -362,6 +371,8 @@ static const struct pci_device_id ahci_pci_tbl[] = { /* Marvell */ { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */ + { PCI_DEVICE(0x1b4b, 0x9123), + .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ /* Promise */ { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 7113c572447..474427b6f99 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -209,6 +209,7 @@ enum { link offline */ AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */ AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */ + AHCI_HFLAG_YES_FBS = (1 << 14), /* force FBS cap on */ /* ap->flags bits */ diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 81e772a94d5..666850d31df 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -430,6 +430,12 @@ void ahci_save_initial_config(struct device *dev, cap &= ~HOST_CAP_SNTF; } + if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) { + dev_printk(KERN_INFO, dev, + "controller can do FBS, turning on CAP_FBS\n"); + cap |= HOST_CAP_FBS; + } + if (force_port_map && port_map != force_port_map) { dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n", port_map, force_port_map); @@ -2036,9 +2042,15 @@ static int ahci_port_start(struct ata_port *ap) u32 cmd = readl(port_mmio + PORT_CMD); if (cmd & PORT_CMD_FBSCP) pp->fbs_supported = true; - else + else if (hpriv->flags & AHCI_HFLAG_YES_FBS) { + dev_printk(KERN_INFO, dev, + "port %d can do FBS, forcing FBSCP\n", + ap->port_no); + pp->fbs_supported = true; + } else dev_printk(KERN_WARNING, dev, - "The port is not capable of FBS\n"); + "port %d is not capable of FBS\n", + ap->port_no); } if (pp->fbs_supported) { diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index 7b5eea7e01d..8b5ea399a4f 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c @@ -145,12 +145,6 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev, struct ata_eh_info *ehi = &ap->link.eh_info; int wait = 0; unsigned long flags; - acpi_handle handle; - - if (dev) - handle = dev->acpi_handle; - else - handle = ap->acpi_handle; spin_lock_irqsave(ap->lock, flags); /* diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 7ef7c4f216f..c035b3d041e 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -5111,15 +5111,18 @@ void ata_qc_issue(struct ata_queued_cmd *qc) qc->flags |= ATA_QCFLAG_ACTIVE; ap->qc_active |= 1 << qc->tag; - /* We guarantee to LLDs that they will have at least one + /* + * We guarantee to LLDs that they will have at least one * non-zero sg if the command is a data command. */ - BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)); + if (WARN_ON_ONCE(ata_is_data(prot) && + (!qc->sg || !qc->n_elem || !qc->nbytes))) + goto sys_err; if (ata_is_dma(prot) || (ata_is_pio(prot) && (ap->flags & ATA_FLAG_PIO_DMA))) if (ata_sg_setup(qc)) - goto sg_err; + goto sys_err; /* if device is sleeping, schedule reset and abort the link */ if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { @@ -5136,7 +5139,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc) goto err; return; -sg_err: +sys_err: qc->err_mask |= AC_ERR_SYSTEM; err: ata_qc_complete(qc); diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 674c1436491..3b82d8ef76f 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -2735,10 +2735,6 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; - /* see ata_dma_blacklisted() */ - BUG_ON((ap->flags & ATA_FLAG_PIO_POLLING) && - qc->tf.protocol == ATAPI_PROT_DMA); - /* defer PIO handling to sff_qc_issue */ if (!ata_is_dma(qc->tf.protocol)) return ata_sff_qc_issue(qc); diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c index 9f5da1c7454..905ff76d3cb 100644 --- a/drivers/ata/pata_cmd64x.c +++ b/drivers/ata/pata_cmd64x.c @@ -121,14 +121,8 @@ static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 m if (pair) { struct ata_timing tp; - ata_timing_compute(pair, pair->pio_mode, &tp, T, 0); ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP); - if (pair->dma_mode) { - ata_timing_compute(pair, pair->dma_mode, - &tp, T, 0); - ata_timing_merge(&tp, &t, &t, ATA_TIMING_SETUP); - } } } diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c index 9df1ff7e1ea..eaf194138f2 100644 --- a/drivers/ata/pata_legacy.c +++ b/drivers/ata/pata_legacy.c @@ -44,6 +44,9 @@ * Specific support is included for the ht6560a/ht6560b/opti82c611a/ * opti82c465mv/promise 20230c/20630/qdi65x0/winbond83759A * + * Support for the Winbond 83759A when operating in advanced mode. + * Multichip mode is not currently supported. + * * Use the autospeed and pio_mask options with: * Appian ADI/2 aka CLPD7220 or AIC25VL01. * Use the jumpers, autospeed and set pio_mask to the mode on the jumpers with @@ -135,12 +138,18 @@ static int ht6560b; /* HT 6560A on primary 1, second 2, both 3 */ static int opti82c611a; /* Opti82c611A on primary 1, sec 2, both 3 */ static int opti82c46x; /* Opti 82c465MV present(pri/sec autodetect) */ static int qdi; /* Set to probe QDI controllers */ -static int winbond; /* Set to probe Winbond controllers, - give I/O port if non standard */ static int autospeed; /* Chip present which snoops speed changes */ static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */ static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */ +#ifdef PATA_WINBOND_VLB_MODULE +static int winbond = 1; /* Set to probe Winbond controllers, + give I/O port if non standard */ +#else +static int winbond; /* Set to probe Winbond controllers, + give I/O port if non standard */ +#endif + /** * legacy_probe_add - Add interface to probe list * @port: Controller port @@ -1297,6 +1306,7 @@ MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for legacy ATA"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +MODULE_ALIAS("pata_winbond"); module_param(probe_all, int, 0); module_param(autospeed, int, 0); @@ -1305,6 +1315,7 @@ module_param(ht6560b, int, 0); module_param(opti82c611a, int, 0); module_param(opti82c46x, int, 0); module_param(qdi, int, 0); +module_param(winbond, int, 0); module_param(pio_mask, int, 0); module_param(iordy_mask, int, 0); diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c deleted file mode 100644 index 6d8619b6f67..00000000000 --- a/drivers/ata/pata_winbond.c +++ /dev/null @@ -1,282 +0,0 @@ -/* - * pata_winbond.c - Winbond VLB ATA controllers - * (C) 2006 Red Hat - * - * Support for the Winbond 83759A when operating in advanced mode. - * Multichip mode is not currently supported. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/blkdev.h> -#include <linux/delay.h> -#include <scsi/scsi_host.h> -#include <linux/libata.h> -#include <linux/platform_device.h> - -#define DRV_NAME "pata_winbond" -#define DRV_VERSION "0.0.3" - -#define NR_HOST 4 /* Two winbond controllers, two channels each */ - -struct winbond_data { - unsigned long config; - struct platform_device *platform_dev; -}; - -static struct ata_host *winbond_host[NR_HOST]; -static struct winbond_data winbond_data[NR_HOST]; -static int nr_winbond_host; - -#ifdef MODULE -static int probe_winbond = 1; -#else -static int probe_winbond; -#endif - -static DEFINE_SPINLOCK(winbond_lock); - -static void winbond_writecfg(unsigned long port, u8 reg, u8 val) -{ - unsigned long flags; - spin_lock_irqsave(&winbond_lock, flags); - outb(reg, port + 0x01); - outb(val, port + 0x02); - spin_unlock_irqrestore(&winbond_lock, flags); -} - -static u8 winbond_readcfg(unsigned long port, u8 reg) -{ - u8 val; - - unsigned long flags; - spin_lock_irqsave(&winbond_lock, flags); - outb(reg, port + 0x01); - val = inb(port + 0x02); - spin_unlock_irqrestore(&winbond_lock, flags); - - return val; -} - -static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev) -{ - struct ata_timing t; - struct winbond_data *winbond = ap->host->private_data; - int active, recovery; - u8 reg; - int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2); - - reg = winbond_readcfg(winbond->config, 0x81); - - /* Get the timing data in cycles */ - if (reg & 0x40) /* Fast VLB bus, assume 50MHz */ - ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000); - else - ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000); - - active = (clamp_val(t.active, 3, 17) - 1) & 0x0F; - recovery = (clamp_val(t.recover, 1, 15) + 1) & 0x0F; - timing = (active << 4) | recovery; - winbond_writecfg(winbond->config, timing, reg); - - /* Load the setup timing */ - - reg = 0x35; - if (adev->class != ATA_DEV_ATA) - reg |= 0x08; /* FIFO off */ - if (!ata_pio_need_iordy(adev)) - reg |= 0x02; /* IORDY off */ - reg |= (clamp_val(t.setup, 0, 3) << 6); - winbond_writecfg(winbond->config, timing + 1, reg); -} - - -static unsigned int winbond_data_xfer(struct ata_device *dev, - unsigned char *buf, unsigned int buflen, int rw) -{ - struct ata_port *ap = dev->link->ap; - int slop = buflen & 3; - - if (ata_id_has_dword_io(dev->id)) { - if (rw == READ) - ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); - else - iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); - - if (unlikely(slop)) { - __le32 pad; - if (rw == READ) { - pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr)); - memcpy(buf + buflen - slop, &pad, slop); - } else { - memcpy(&pad, buf + buflen - slop, slop); - iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr); - } - buflen += 4 - slop; - } - } else - buflen = ata_sff_data_xfer(dev, buf, buflen, rw); - - return buflen; -} - -static struct scsi_host_template winbond_sht = { - ATA_PIO_SHT(DRV_NAME), -}; - -static struct ata_port_operations winbond_port_ops = { - .inherits = &ata_sff_port_ops, - .sff_data_xfer = winbond_data_xfer, - .cable_detect = ata_cable_40wire, - .set_piomode = winbond_set_piomode, -}; - -/** - * winbond_init_one - attach a winbond interface - * @type: Type to display - * @io: I/O port start - * @irq: interrupt line - * @fast: True if on a > 33Mhz VLB - * - * Register a VLB bus IDE interface. Such interfaces are PIO and we - * assume do not support IRQ sharing. - */ - -static __init int winbond_init_one(unsigned long port) -{ - struct platform_device *pdev; - u8 reg; - int i, rc; - - reg = winbond_readcfg(port, 0x81); - reg |= 0x80; /* jumpered mode off */ - winbond_writecfg(port, 0x81, reg); - reg = winbond_readcfg(port, 0x83); - reg |= 0xF0; /* local control */ - winbond_writecfg(port, 0x83, reg); - reg = winbond_readcfg(port, 0x85); - reg |= 0xF0; /* programmable timing */ - winbond_writecfg(port, 0x85, reg); - - reg = winbond_readcfg(port, 0x81); - - if (!(reg & 0x03)) /* Disabled */ - return -ENODEV; - - for (i = 0; i < 2 ; i ++) { - unsigned long cmd_port = 0x1F0 - (0x80 * i); - unsigned long ctl_port = cmd_port + 0x206; - struct ata_host *host; - struct ata_port *ap; - void __iomem *cmd_addr, *ctl_addr; - - if (!(reg & (1 << i))) - continue; - - pdev = platform_device_register_simple(DRV_NAME, nr_winbond_host, NULL, 0); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); - - rc = -ENOMEM; - host = ata_host_alloc(&pdev->dev, 1); - if (!host) - goto err_unregister; - ap = host->ports[0]; - - rc = -ENOMEM; - cmd_addr = devm_ioport_map(&pdev->dev, cmd_port, 8); - ctl_addr = devm_ioport_map(&pdev->dev, ctl_port, 1); - if (!cmd_addr || !ctl_addr) - goto err_unregister; - - ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", cmd_port, ctl_port); - - ap->ops = &winbond_port_ops; - ap->pio_mask = ATA_PIO4; - ap->flags |= ATA_FLAG_SLAVE_POSS; - ap->ioaddr.cmd_addr = cmd_addr; - ap->ioaddr.altstatus_addr = ctl_addr; - ap->ioaddr.ctl_addr = ctl_addr; - ata_sff_std_ports(&ap->ioaddr); - - /* hook in a private data structure per channel */ - host->private_data = &winbond_data[nr_winbond_host]; - winbond_data[nr_winbond_host].config = port; - winbond_data[nr_winbond_host].platform_dev = pdev; - - /* activate */ - rc = ata_host_activate(host, 14 + i, ata_sff_interrupt, 0, - &winbond_sht); - if (rc) - goto err_unregister; - - winbond_host[nr_winbond_host++] = dev_get_drvdata(&pdev->dev); - } - - return 0; - - err_unregister: - platform_device_unregister(pdev); - return rc; -} - -/** - * winbond_init - attach winbond interfaces - * - * Attach winbond IDE interfaces by scanning the ports it may occupy. - */ - -static __init int winbond_init(void) -{ - static const unsigned long config[2] = { 0x130, 0x1B0 }; - - int ct = 0; - int i; - - if (probe_winbond == 0) - return -ENODEV; - - /* - * Check both base addresses - */ - - for (i = 0; i < 2; i++) { - if (probe_winbond & (1<<i)) { - int ret = 0; - unsigned long port = config[i]; - - if (request_region(port, 2, "pata_winbond")) { - ret = winbond_init_one(port); - if (ret <= 0) - release_region(port, 2); - else ct+= ret; - } - } - } - if (ct != 0) - return 0; - return -ENODEV; -} - -static __exit void winbond_exit(void) -{ - int i; - - for (i = 0; i < nr_winbond_host; i++) { - ata_host_detach(winbond_host[i]); - release_region(winbond_data[i].config, 2); - platform_device_unregister(winbond_data[i].platform_dev); - } -} - -MODULE_AUTHOR("Alan Cox"); -MODULE_DESCRIPTION("low-level driver for Winbond VL ATA"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); - -module_init(winbond_init); -module_exit(winbond_exit); - -module_param(probe_winbond, int, 0); - diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c index ea24c1e51be..6cf57c5c2b5 100644 --- a/drivers/ata/sata_dwc_460ex.c +++ b/drivers/ata/sata_dwc_460ex.c @@ -1459,7 +1459,7 @@ static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag) { struct scatterlist *sg = qc->sg; struct ata_port *ap = qc->ap; - u32 dma_chan; + int dma_chan; struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); int err; @@ -1588,7 +1588,7 @@ static const struct ata_port_info sata_dwc_port_info[] = { }, }; -static int sata_dwc_probe(struct of_device *ofdev, +static int sata_dwc_probe(struct platform_device *ofdev, const struct of_device_id *match) { struct sata_dwc_device *hsdev; @@ -1702,7 +1702,7 @@ error_out: return err; } -static int sata_dwc_remove(struct of_device *ofdev) +static int sata_dwc_remove(struct platform_device *ofdev) { struct device *dev = &ofdev->dev; struct ata_host *host = dev_get_drvdata(dev); diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 9463c71dd38..81982594a01 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -1898,19 +1898,25 @@ static void mv_bmdma_start(struct ata_queued_cmd *qc) * LOCKING: * Inherited from caller. */ -static void mv_bmdma_stop(struct ata_queued_cmd *qc) +static void mv_bmdma_stop_ap(struct ata_port *ap) { - struct ata_port *ap = qc->ap; void __iomem *port_mmio = mv_ap_base(ap); u32 cmd; /* clear start/stop bit */ cmd = readl(port_mmio + BMDMA_CMD); - cmd &= ~ATA_DMA_START; - writelfl(cmd, port_mmio + BMDMA_CMD); + if (cmd & ATA_DMA_START) { + cmd &= ~ATA_DMA_START; + writelfl(cmd, port_mmio + BMDMA_CMD); + + /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ + ata_sff_dma_pause(ap); + } +} - /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ - ata_sff_dma_pause(ap); +static void mv_bmdma_stop(struct ata_queued_cmd *qc) +{ + mv_bmdma_stop_ap(qc->ap); } /** @@ -1934,8 +1940,21 @@ static u8 mv_bmdma_status(struct ata_port *ap) reg = readl(port_mmio + BMDMA_STATUS); if (reg & ATA_DMA_ACTIVE) status = ATA_DMA_ACTIVE; - else + else if (reg & ATA_DMA_ERR) status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR; + else { + /* + * Just because DMA_ACTIVE is 0 (DMA completed), + * this does _not_ mean the device is "done". + * So we should not yet be signalling ATA_DMA_INTR + * in some cases. Eg. DSM/TRIM, and perhaps others. + */ + mv_bmdma_stop_ap(ap); + if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY) + status = 0; + else + status = ATA_DMA_INTR; + } return status; } @@ -1995,6 +2014,9 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) switch (tf->protocol) { case ATA_PROT_DMA: + if (tf->command == ATA_CMD_DSM) + return; + /* fall-thru */ case ATA_PROT_NCQ: break; /* continue below */ case ATA_PROT_PIO: @@ -2094,6 +2116,8 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) if ((tf->protocol != ATA_PROT_DMA) && (tf->protocol != ATA_PROT_NCQ)) return; + if (tf->command == ATA_CMD_DSM) + return; /* use bmdma for this */ /* Fill in Gen IIE command request block */ if (!(tf->flags & ATA_TFLAG_WRITE)) @@ -2289,6 +2313,12 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) switch (qc->tf.protocol) { case ATA_PROT_DMA: + if (qc->tf.command == ATA_CMD_DSM) { + if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */ + return AC_ERR_OTHER; + break; /* use bmdma for this */ + } + /* fall thru */ case ATA_PROT_NCQ: mv_start_edma(ap, port_mmio, pp, qc->tf.protocol); pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index c8a44f5e058..40af43ebd92 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -568,7 +568,7 @@ static int _request_firmware(const struct firmware **firmware_p, out: if (retval) { release_firmware(firmware); - firmware_p = NULL; + *firmware_p = NULL; } return retval; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index ac1b682edec..ab735a605cf 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -834,7 +834,7 @@ static int blkfront_probe(struct xenbus_device *dev, char *type; int len; /* no unplug has been done: do not hook devices != xen vbds */ - if (xen_platform_pci_unplug & XEN_UNPLUG_IGNORE) { + if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) { int major; if (!VDEV_IS_EXTENDED(vdevice)) diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 2982b3ee946..057413bb16e 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -94,6 +94,7 @@ #include <linux/hdreg.h> #include <linux/platform_device.h> #if defined(CONFIG_OF) +#include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_platform.h> #endif diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index ddf5def1b0d..eab58db5f91 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -12,6 +12,7 @@ #include <asm/smp.h> #include "agp.h" #include "intel-agp.h" +#include <linux/intel-gtt.h> #include "intel-gtt.c" @@ -815,9 +816,19 @@ static const struct intel_driver_description { "HD Graphics", NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, "HD Graphics", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG, "Sandybridge", NULL, &intel_gen6_driver }, - { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG, + "Sandybridge", NULL, &intel_gen6_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG, + "Sandybridge", NULL, &intel_gen6_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG, + "Sandybridge", NULL, &intel_gen6_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG, + "Sandybridge", NULL, &intel_gen6_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG, + "Sandybridge", NULL, &intel_gen6_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG, "Sandybridge", NULL, &intel_gen6_driver }, { 0, 0, NULL, NULL, NULL } }; @@ -825,7 +836,8 @@ static const struct intel_driver_description { static int __devinit intel_gmch_probe(struct pci_dev *pdev, struct agp_bridge_data *bridge) { - int i; + int i, mask; + bridge->driver = NULL; for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { @@ -845,14 +857,19 @@ static int __devinit intel_gmch_probe(struct pci_dev *pdev, dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); - if (bridge->driver->mask_memory == intel_i965_mask_memory) { - if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36))) - dev_err(&intel_private.pcidev->dev, - "set gfx device dma mask 36bit failed!\n"); - else - pci_set_consistent_dma_mask(intel_private.pcidev, - DMA_BIT_MASK(36)); - } + if (bridge->driver->mask_memory == intel_gen6_mask_memory) + mask = 40; + else if (bridge->driver->mask_memory == intel_i965_mask_memory) + mask = 36; + else + mask = 32; + + if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) + dev_err(&intel_private.pcidev->dev, + "set gfx device dma mask %d-bit failed!\n", mask); + else + pci_set_consistent_dma_mask(intel_private.pcidev, + DMA_BIT_MASK(mask)); return 1; } @@ -1036,6 +1053,7 @@ static struct pci_device_id agp_intel_pci_table[] = { ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB), ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB), + ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB), { } }; diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index c05e3e51826..ee189c74d34 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h @@ -1,6 +1,8 @@ /* * Common Intel AGPGART and GTT definitions. */ +#ifndef _INTEL_AGP_H +#define _INTEL_AGP_H /* Intel registers */ #define INTEL_APSIZE 0xb4 @@ -200,10 +202,16 @@ #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 #define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 -#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 -#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102 -#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 -#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */ +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */ +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */ +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A /* cover 915 and 945 variants */ #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ @@ -230,7 +238,8 @@ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) #define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB || \ + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB) #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ @@ -243,3 +252,5 @@ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \ IS_SNB) + +#endif diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index d22ffb811bf..75e0a349788 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -49,6 +49,26 @@ static struct gatt_mask intel_i810_masks[] = .type = INTEL_AGP_CACHED_MEMORY} }; +#define INTEL_AGP_UNCACHED_MEMORY 0 +#define INTEL_AGP_CACHED_MEMORY_LLC 1 +#define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2 +#define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3 +#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4 + +static struct gatt_mask intel_gen6_masks[] = +{ + {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED, + .type = INTEL_AGP_UNCACHED_MEMORY }, + {.mask = I810_PTE_VALID | GEN6_PTE_LLC, + .type = INTEL_AGP_CACHED_MEMORY_LLC }, + {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT, + .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT }, + {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC, + .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC }, + {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT, + .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT }, +}; + static struct _intel_private { struct pci_dev *pcidev; /* device one */ u8 __iomem *registers; @@ -178,13 +198,6 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem, off_t pg_start, int mask_type) { int i, j; - u32 cache_bits = 0; - - if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) - { - cache_bits = GEN6_PTE_LLC_MLC; - } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { writel(agp_bridge->driver->mask_memory(agp_bridge, @@ -317,6 +330,23 @@ static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge, return 0; } +static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge, + int type) +{ + unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT; + unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT; + + if (type_mask == AGP_USER_UNCACHED_MEMORY) + return INTEL_AGP_UNCACHED_MEMORY; + else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) + return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT : + INTEL_AGP_CACHED_MEMORY_LLC_MLC; + else /* set 'normal'/'cached' to LLC by default */ + return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT : + INTEL_AGP_CACHED_MEMORY_LLC; +} + + static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, int type) { @@ -588,8 +618,7 @@ static void intel_i830_init_gtt_entries(void) gtt_entries = 0; break; } - } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) { + } else if (IS_SNB) { /* * SandyBridge has new memory control reg at 0x50.w */ @@ -1068,11 +1097,11 @@ static void intel_i9xx_setup_flush(void) intel_i915_setup_chipset_flush(); } - if (intel_private.ifp_resource.start) { + if (intel_private.ifp_resource.start) intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); - if (!intel_private.i9xx_flush_page) - dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing"); - } + if (!intel_private.i9xx_flush_page) + dev_err(&intel_private.pcidev->dev, + "can't ioremap flush page - no chipset flushing\n"); } static int intel_i9xx_configure(void) @@ -1163,7 +1192,7 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); - if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && + if (!IS_SNB && mask_type != 0 && mask_type != AGP_PHYS_MEMORY && mask_type != INTEL_AGP_CACHED_MEMORY) goto out_err; @@ -1333,8 +1362,8 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr, int type) { - /* Shift high bits down */ - addr |= (addr >> 28) & 0xff; + /* gen6 has bit11-4 for physical addr bit39-32 */ + addr |= (addr >> 28) & 0xff0; /* Type checking must be done elsewhere */ return addr | bridge->driver->masks[type].mask; @@ -1359,6 +1388,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) break; case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB: case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB: + case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB: *gtt_offset = MB(2); pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); @@ -1563,7 +1593,7 @@ static const struct agp_bridge_driver intel_gen6_driver = { .fetch_size = intel_i9xx_fetch_size, .cleanup = intel_i915_cleanup, .mask_memory = intel_gen6_mask_memory, - .masks = intel_i810_masks, + .masks = intel_gen6_masks, .agp_enable = intel_i810_agp_enable, .cache_flush = global_cache_flush, .create_gatt_table = intel_i965_create_gatt_table, @@ -1576,7 +1606,7 @@ static const struct agp_bridge_driver intel_gen6_driver = { .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, - .agp_type_to_mask_type = intel_i830_type_to_mask_type, + .agp_type_to_mask_type = intel_gen6_type_to_mask_type, .chipset_flush = intel_i915_chipset_flush, #ifdef USE_PCI_DMA_API .agp_map_page = intel_agp_map_page, diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c index e0249722d25..f953c96efc8 100644 --- a/drivers/char/hangcheck-timer.c +++ b/drivers/char/hangcheck-timer.c @@ -159,7 +159,7 @@ static void hangcheck_fire(unsigned long data) if (hangcheck_dump_tasks) { printk(KERN_CRIT "Hangcheck: Task state:\n"); #ifdef CONFIG_MAGIC_SYSRQ - handle_sysrq('t', NULL); + handle_sysrq('t'); #endif /* CONFIG_MAGIC_SYSRQ */ } if (hangcheck_reboot) { diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c index fa27d1676ee..3afd62e856e 100644 --- a/drivers/char/hvc_console.c +++ b/drivers/char/hvc_console.c @@ -651,7 +651,7 @@ int hvc_poll(struct hvc_struct *hp) if (sysrq_pressed) continue; } else if (sysrq_pressed) { - handle_sysrq(buf[i], tty); + handle_sysrq(buf[i]); sysrq_pressed = 0; continue; } diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c index 1f4b6de65a2..a2bc885ce60 100644 --- a/drivers/char/hvsi.c +++ b/drivers/char/hvsi.c @@ -403,7 +403,7 @@ static void hvsi_insert_chars(struct hvsi_struct *hp, const char *buf, int len) hp->sysrq = 1; continue; } else if (hp->sysrq) { - handle_sysrq(c, hp->tty); + handle_sysrq(c); hp->sysrq = 0; continue; } diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c index 1acdb250951..a3f5e381e74 100644 --- a/drivers/char/hw_random/n2-drv.c +++ b/drivers/char/hw_random/n2-drv.c @@ -387,7 +387,7 @@ static int n2rng_init_control(struct n2rng *np) static int n2rng_data_read(struct hwrng *rng, u32 *data) { - struct n2rng *np = rng->priv; + struct n2rng *np = (struct n2rng *) rng->priv; unsigned long ra = __pa(&np->test_data); int len; diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c index 07f3ea38b58..d4b71e8d0d2 100644 --- a/drivers/char/ip2/ip2main.c +++ b/drivers/char/ip2/ip2main.c @@ -1650,7 +1650,7 @@ ip2_close( PTTY tty, struct file *pFile ) /* disable DSS reporting */ i2QueueCommands(PTYPE_INLINE, pCh, 100, 4, CMD_DCD_NREP, CMD_CTS_NREP, CMD_DSR_NREP, CMD_RI_NREP); - if ( !tty || (tty->termios->c_cflag & HUPCL) ) { + if (tty->termios->c_cflag & HUPCL) { i2QueueCommands(PTYPE_INLINE, pCh, 100, 2, CMD_RTSDN, CMD_DTRDN); pCh->dataSetOut &= ~(I2_DTR | I2_RTS); i2QueueCommands( PTYPE_INLINE, pCh, 100, 1, CMD_PAUSE(25)); @@ -2930,6 +2930,8 @@ ip2_ipl_ioctl (struct file *pFile, UINT cmd, ULONG arg ) if ( pCh ) { rc = copy_to_user(argp, pCh, sizeof(i2ChanStr)); + if (rc) + rc = -EFAULT; } else { rc = -ENODEV; } diff --git a/drivers/char/pty.c b/drivers/char/pty.c index ad46eae1f9b..c350d01716b 100644 --- a/drivers/char/pty.c +++ b/drivers/char/pty.c @@ -675,8 +675,8 @@ static int ptmx_open(struct inode *inode, struct file *filp) } set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ - filp->private_data = tty; - file_move(filp, &tty->tty_files); + + tty_add_file(tty, filp); retval = devpts_pty_new(inode, tty->link); if (retval) diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c index 79c3bc69165..7c79d243acc 100644 --- a/drivers/char/rocket.c +++ b/drivers/char/rocket.c @@ -1244,6 +1244,7 @@ static int set_config(struct tty_struct *tty, struct r_port *info, } info->flags = ((info->flags & ~ROCKET_USR_MASK) | (new_serial.flags & ROCKET_USR_MASK)); configure_r_port(tty, info, NULL); + mutex_unlock(&info->port.mutex); return 0; } diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c index fef80cfcab5..e63b830c86c 100644 --- a/drivers/char/synclink_gt.c +++ b/drivers/char/synclink_gt.c @@ -691,8 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp) if (info->port.count == 1) { /* 1st open on this device, init hardware */ retval = startup(info); - if (retval < 0) + if (retval < 0) { + mutex_unlock(&info->port.mutex); goto cleanup; + } } mutex_unlock(&info->port.mutex); retval = block_til_ready(tty, filp, info); diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index 878ac0c2cc6..ef31bb81e84 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c @@ -18,7 +18,6 @@ #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/fs.h> -#include <linux/tty.h> #include <linux/mount.h> #include <linux/kdev_t.h> #include <linux/major.h> @@ -76,7 +75,7 @@ static int __init sysrq_always_enabled_setup(char *str) __setup("sysrq_always_enabled", sysrq_always_enabled_setup); -static void sysrq_handle_loglevel(int key, struct tty_struct *tty) +static void sysrq_handle_loglevel(int key) { int i; @@ -93,7 +92,7 @@ static struct sysrq_key_op sysrq_loglevel_op = { }; #ifdef CONFIG_VT -static void sysrq_handle_SAK(int key, struct tty_struct *tty) +static void sysrq_handle_SAK(int key) { struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work; schedule_work(SAK_work); @@ -109,7 +108,7 @@ static struct sysrq_key_op sysrq_SAK_op = { #endif #ifdef CONFIG_VT -static void sysrq_handle_unraw(int key, struct tty_struct *tty) +static void sysrq_handle_unraw(int key) { struct kbd_struct *kbd = &kbd_table[fg_console]; @@ -126,7 +125,7 @@ static struct sysrq_key_op sysrq_unraw_op = { #define sysrq_unraw_op (*(struct sysrq_key_op *)NULL) #endif /* CONFIG_VT */ -static void sysrq_handle_crash(int key, struct tty_struct *tty) +static void sysrq_handle_crash(int key) { char *killer = NULL; @@ -141,7 +140,7 @@ static struct sysrq_key_op sysrq_crash_op = { .enable_mask = SYSRQ_ENABLE_DUMP, }; -static void sysrq_handle_reboot(int key, struct tty_struct *tty) +static void sysrq_handle_reboot(int key) { lockdep_off(); local_irq_enable(); @@ -154,7 +153,7 @@ static struct sysrq_key_op sysrq_reboot_op = { .enable_mask = SYSRQ_ENABLE_BOOT, }; -static void sysrq_handle_sync(int key, struct tty_struct *tty) +static void sysrq_handle_sync(int key) { emergency_sync(); } @@ -165,7 +164,7 @@ static struct sysrq_key_op sysrq_sync_op = { .enable_mask = SYSRQ_ENABLE_SYNC, }; -static void sysrq_handle_show_timers(int key, struct tty_struct *tty) +static void sysrq_handle_show_timers(int key) { sysrq_timer_list_show(); } @@ -176,7 +175,7 @@ static struct sysrq_key_op sysrq_show_timers_op = { .action_msg = "Show clockevent devices & pending hrtimers (no others)", }; -static void sysrq_handle_mountro(int key, struct tty_struct *tty) +static void sysrq_handle_mountro(int key) { emergency_remount(); } @@ -188,7 +187,7 @@ static struct sysrq_key_op sysrq_mountro_op = { }; #ifdef CONFIG_LOCKDEP -static void sysrq_handle_showlocks(int key, struct tty_struct *tty) +static void sysrq_handle_showlocks(int key) { debug_show_all_locks(); } @@ -226,7 +225,7 @@ static void sysrq_showregs_othercpus(struct work_struct *dummy) static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus); -static void sysrq_handle_showallcpus(int key, struct tty_struct *tty) +static void sysrq_handle_showallcpus(int key) { /* * Fall back to the workqueue based printing if the @@ -252,7 +251,7 @@ static struct sysrq_key_op sysrq_showallcpus_op = { }; #endif -static void sysrq_handle_showregs(int key, struct tty_struct *tty) +static void sysrq_handle_showregs(int key) { struct pt_regs *regs = get_irq_regs(); if (regs) @@ -266,7 +265,7 @@ static struct sysrq_key_op sysrq_showregs_op = { .enable_mask = SYSRQ_ENABLE_DUMP, }; -static void sysrq_handle_showstate(int key, struct tty_struct *tty) +static void sysrq_handle_showstate(int key) { show_state(); } @@ -277,7 +276,7 @@ static struct sysrq_key_op sysrq_showstate_op = { .enable_mask = SYSRQ_ENABLE_DUMP, }; -static void sysrq_handle_showstate_blocked(int key, struct tty_struct *tty) +static void sysrq_handle_showstate_blocked(int key) { show_state_filter(TASK_UNINTERRUPTIBLE); } @@ -291,7 +290,7 @@ static struct sysrq_key_op sysrq_showstate_blocked_op = { #ifdef CONFIG_TRACING #include <linux/ftrace.h> -static void sysrq_ftrace_dump(int key, struct tty_struct *tty) +static void sysrq_ftrace_dump(int key) { ftrace_dump(DUMP_ALL); } @@ -305,7 +304,7 @@ static struct sysrq_key_op sysrq_ftrace_dump_op = { #define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)NULL) #endif -static void sysrq_handle_showmem(int key, struct tty_struct *tty) +static void sysrq_handle_showmem(int key) { show_mem(); } @@ -330,7 +329,7 @@ static void send_sig_all(int sig) } } -static void sysrq_handle_term(int key, struct tty_struct *tty) +static void sysrq_handle_term(int key) { send_sig_all(SIGTERM); console_loglevel = 8; @@ -349,7 +348,7 @@ static void moom_callback(struct work_struct *ignored) static DECLARE_WORK(moom_work, moom_callback); -static void sysrq_handle_moom(int key, struct tty_struct *tty) +static void sysrq_handle_moom(int key) { schedule_work(&moom_work); } @@ -361,7 +360,7 @@ static struct sysrq_key_op sysrq_moom_op = { }; #ifdef CONFIG_BLOCK -static void sysrq_handle_thaw(int key, struct tty_struct *tty) +static void sysrq_handle_thaw(int key) { emergency_thaw_all(); } @@ -373,7 +372,7 @@ static struct sysrq_key_op sysrq_thaw_op = { }; #endif -static void sysrq_handle_kill(int key, struct tty_struct *tty) +static void sysrq_handle_kill(int key) { send_sig_all(SIGKILL); console_loglevel = 8; @@ -385,7 +384,7 @@ static struct sysrq_key_op sysrq_kill_op = { .enable_mask = SYSRQ_ENABLE_SIGNAL, }; -static void sysrq_handle_unrt(int key, struct tty_struct *tty) +static void sysrq_handle_unrt(int key) { normalize_rt_tasks(); } @@ -493,7 +492,7 @@ static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p) sysrq_key_table[i] = op_p; } -void __handle_sysrq(int key, struct tty_struct *tty, int check_mask) +void __handle_sysrq(int key, bool check_mask) { struct sysrq_key_op *op_p; int orig_log_level; @@ -520,7 +519,7 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask) if (!check_mask || sysrq_on_mask(op_p->enable_mask)) { printk("%s\n", op_p->action_msg); console_loglevel = orig_log_level; - op_p->handler(key, tty); + op_p->handler(key); } else { printk("This sysrq operation is disabled.\n"); } @@ -545,10 +544,10 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask) spin_unlock_irqrestore(&sysrq_key_table_lock, flags); } -void handle_sysrq(int key, struct tty_struct *tty) +void handle_sysrq(int key) { if (sysrq_on()) - __handle_sysrq(key, tty, 1); + __handle_sysrq(key, true); } EXPORT_SYMBOL(handle_sysrq); @@ -597,7 +596,7 @@ static bool sysrq_filter(struct input_handle *handle, unsigned int type, default: if (sysrq_down && value && value != 2) - __handle_sysrq(sysrq_xlate[code], NULL, 1); + __handle_sysrq(sysrq_xlate[code], true); break; } @@ -765,7 +764,7 @@ static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf, if (get_user(c, buf)) return -EFAULT; - __handle_sysrq(c, NULL, 0); + __handle_sysrq(c, false); } return count; diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 0350c42375a..613c852ee0f 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -136,6 +136,9 @@ LIST_HEAD(tty_drivers); /* linked list of tty drivers */ DEFINE_MUTEX(tty_mutex); EXPORT_SYMBOL(tty_mutex); +/* Spinlock to protect the tty->tty_files list */ +DEFINE_SPINLOCK(tty_files_lock); + static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *); static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *); ssize_t redirected_tty_write(struct file *, const char __user *, @@ -185,6 +188,41 @@ void free_tty_struct(struct tty_struct *tty) kfree(tty); } +static inline struct tty_struct *file_tty(struct file *file) +{ + return ((struct tty_file_private *)file->private_data)->tty; +} + +/* Associate a new file with the tty structure */ +void tty_add_file(struct tty_struct *tty, struct file *file) +{ + struct tty_file_private *priv; + + /* XXX: must implement proper error handling in callers */ + priv = kmalloc(sizeof(*priv), GFP_KERNEL|__GFP_NOFAIL); + + priv->tty = tty; + priv->file = file; + file->private_data = priv; + + spin_lock(&tty_files_lock); + list_add(&priv->list, &tty->tty_files); + spin_unlock(&tty_files_lock); +} + +/* Delete file from its tty */ +void tty_del_file(struct file *file) +{ + struct tty_file_private *priv = file->private_data; + + spin_lock(&tty_files_lock); + list_del(&priv->list); + spin_unlock(&tty_files_lock); + file->private_data = NULL; + kfree(priv); +} + + #define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base) /** @@ -235,11 +273,11 @@ static int check_tty_count(struct tty_struct *tty, const char *routine) struct list_head *p; int count = 0; - file_list_lock(); + spin_lock(&tty_files_lock); list_for_each(p, &tty->tty_files) { count++; } - file_list_unlock(); + spin_unlock(&tty_files_lock); if (tty->driver->type == TTY_DRIVER_TYPE_PTY && tty->driver->subtype == PTY_TYPE_SLAVE && tty->link && tty->link->count) @@ -317,7 +355,7 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line) if (*stp == '\0') stp = NULL; - if (tty_line >= 0 && tty_line <= p->num && p->ops && + if (tty_line >= 0 && tty_line < p->num && p->ops && p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) { res = tty_driver_kref_get(p); *line = tty_line; @@ -497,6 +535,7 @@ void __tty_hangup(struct tty_struct *tty) struct file *cons_filp = NULL; struct file *filp, *f = NULL; struct task_struct *p; + struct tty_file_private *priv; int closecount = 0, n; unsigned long flags; int refs = 0; @@ -506,7 +545,7 @@ void __tty_hangup(struct tty_struct *tty) spin_lock(&redirect_lock); - if (redirect && redirect->private_data == tty) { + if (redirect && file_tty(redirect) == tty) { f = redirect; redirect = NULL; } @@ -519,9 +558,10 @@ void __tty_hangup(struct tty_struct *tty) workqueue with the lock held */ check_tty_count(tty, "tty_hangup"); - file_list_lock(); + spin_lock(&tty_files_lock); /* This breaks for file handles being sent over AF_UNIX sockets ? */ - list_for_each_entry(filp, &tty->tty_files, f_u.fu_list) { + list_for_each_entry(priv, &tty->tty_files, list) { + filp = priv->file; if (filp->f_op->write == redirected_tty_write) cons_filp = filp; if (filp->f_op->write != tty_write) @@ -530,7 +570,7 @@ void __tty_hangup(struct tty_struct *tty) __tty_fasync(-1, filp, 0); /* can't block */ filp->f_op = &hung_up_tty_fops; } - file_list_unlock(); + spin_unlock(&tty_files_lock); tty_ldisc_hangup(tty); @@ -889,12 +929,10 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int i; - struct tty_struct *tty; - struct inode *inode; + struct inode *inode = file->f_path.dentry->d_inode; + struct tty_struct *tty = file_tty(file); struct tty_ldisc *ld; - tty = file->private_data; - inode = file->f_path.dentry->d_inode; if (tty_paranoia_check(tty, inode, "tty_read")) return -EIO; if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags))) @@ -1065,12 +1103,11 @@ void tty_write_message(struct tty_struct *tty, char *msg) static ssize_t tty_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - struct tty_struct *tty; struct inode *inode = file->f_path.dentry->d_inode; + struct tty_struct *tty = file_tty(file); + struct tty_ldisc *ld; ssize_t ret; - struct tty_ldisc *ld; - tty = file->private_data; if (tty_paranoia_check(tty, inode, "tty_write")) return -EIO; if (!tty || !tty->ops->write || @@ -1424,9 +1461,9 @@ static void release_one_tty(struct work_struct *work) tty_driver_kref_put(driver); module_put(driver->owner); - file_list_lock(); + spin_lock(&tty_files_lock); list_del_init(&tty->tty_files); - file_list_unlock(); + spin_unlock(&tty_files_lock); put_pid(tty->pgrp); put_pid(tty->session); @@ -1507,13 +1544,13 @@ static void release_tty(struct tty_struct *tty, int idx) int tty_release(struct inode *inode, struct file *filp) { - struct tty_struct *tty, *o_tty; + struct tty_struct *tty = file_tty(filp); + struct tty_struct *o_tty; int pty_master, tty_closing, o_tty_closing, do_sleep; int devpts; int idx; char buf[64]; - tty = filp->private_data; if (tty_paranoia_check(tty, inode, "tty_release_dev")) return 0; @@ -1671,8 +1708,7 @@ int tty_release(struct inode *inode, struct file *filp) * - do_tty_hangup no longer sees this file descriptor as * something that needs to be handled for hangups. */ - file_kill(filp); - filp->private_data = NULL; + tty_del_file(filp); /* * Perform some housekeeping before deciding whether to return. @@ -1839,8 +1875,8 @@ got_driver: return PTR_ERR(tty); } - filp->private_data = tty; - file_move(filp, &tty->tty_files); + tty_add_file(tty, filp); + check_tty_count(tty, "tty_open"); if (tty->driver->type == TTY_DRIVER_TYPE_PTY && tty->driver->subtype == PTY_TYPE_MASTER) @@ -1916,11 +1952,10 @@ got_driver: static unsigned int tty_poll(struct file *filp, poll_table *wait) { - struct tty_struct *tty; + struct tty_struct *tty = file_tty(filp); struct tty_ldisc *ld; int ret = 0; - tty = filp->private_data; if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_poll")) return 0; @@ -1933,11 +1968,10 @@ static unsigned int tty_poll(struct file *filp, poll_table *wait) static int __tty_fasync(int fd, struct file *filp, int on) { - struct tty_struct *tty; + struct tty_struct *tty = file_tty(filp); unsigned long flags; int retval = 0; - tty = filp->private_data; if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_fasync")) goto out; @@ -2491,13 +2525,13 @@ EXPORT_SYMBOL(tty_pair_get_pty); */ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - struct tty_struct *tty, *real_tty; + struct tty_struct *tty = file_tty(file); + struct tty_struct *real_tty; void __user *p = (void __user *)arg; int retval; struct tty_ldisc *ld; struct inode *inode = file->f_dentry->d_inode; - tty = file->private_data; if (tty_paranoia_check(tty, inode, "tty_ioctl")) return -EINVAL; @@ -2619,7 +2653,7 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct inode *inode = file->f_dentry->d_inode; - struct tty_struct *tty = file->private_data; + struct tty_struct *tty = file_tty(file); struct tty_ldisc *ld; int retval = -ENOIOCTLCMD; @@ -2711,7 +2745,7 @@ void __do_SAK(struct tty_struct *tty) if (!filp) continue; if (filp->f_op->read == tty_read && - filp->private_data == tty) { + file_tty(filp) == tty) { printk(KERN_NOTICE "SAK: killed process %d" " (%s): fd#%d opened to the tty\n", task_pid_nr(p), p->comm, i); diff --git a/drivers/char/vt.c b/drivers/char/vt.c index c734f9b1263..281aada7b4a 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c @@ -194,10 +194,11 @@ static DECLARE_WORK(console_work, console_callback); int fg_console; int last_console; int want_console = -1; -int saved_fg_console; -int saved_last_console; -int saved_want_console; -int saved_vc_mode; +static int saved_fg_console; +static int saved_last_console; +static int saved_want_console; +static int saved_vc_mode; +static int saved_console_blanked; /* * For each existing display, we have a pointer to console currently visible @@ -905,22 +906,16 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, * bottom of buffer */ old_origin += (old_rows - new_rows) * old_row_size; - end = vc->vc_scr_end; } else { /* * Cursor is in no man's land, copy 1/2 screenful * from the top and bottom of cursor position */ old_origin += (vc->vc_y - new_rows/2) * old_row_size; - end = old_origin + (old_row_size * new_rows); } - } else - /* - * Cursor near the top, copy contents from the top of buffer - */ - end = (old_rows > new_rows) ? old_origin + - (old_row_size * new_rows) : - vc->vc_scr_end; + } + + end = old_origin + old_row_size * min(old_rows, new_rows); update_attr(vc); @@ -3074,8 +3069,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last, old_was_color = vc->vc_can_do_color; vc->vc_sw->con_deinit(vc); - if (!vc->vc_origin) - vc->vc_origin = (unsigned long)vc->vc_screenbuf; + vc->vc_origin = (unsigned long)vc->vc_screenbuf; visual_init(vc, i, 0); set_origin(vc); update_attr(vc); @@ -3449,6 +3443,7 @@ int con_debug_enter(struct vc_data *vc) saved_last_console = last_console; saved_want_console = want_console; saved_vc_mode = vc->vc_mode; + saved_console_blanked = console_blanked; vc->vc_mode = KD_TEXT; console_blanked = 0; if (vc->vc_sw->con_debug_enter) @@ -3492,6 +3487,7 @@ int con_debug_leave(void) fg_console = saved_fg_console; last_console = saved_last_console; want_console = saved_want_console; + console_blanked = saved_console_blanked; vc_cons[fg_console].d->vc_mode = saved_vc_mode; vc = vc_cons[fg_console].d; diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c index 0ed763cd2e7..b663d573aad 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c @@ -94,6 +94,7 @@ #ifdef CONFIG_OF /* For open firmware. */ +#include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_platform.h> #endif diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 670239ab751..e7d5d6b5dcf 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -2071,16 +2071,6 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, amd64_handle_ce(mci, info); else if (ecc_type == 1) amd64_handle_ue(mci, info); - - /* - * If main error is CE then overflow must be CE. If main error is UE - * then overflow is unknown. We'll call the overflow a CE - if - * panic_on_ue is set then we're already panic'ed and won't arrive - * here. Else, then apparently someone doesn't think that UE's are - * catastrophic. - */ - if (info->nbsh & K8_NBSH_OVERFLOW) - edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR " Error Overflow"); } void amd64_decode_bus_error(int node_id, struct err_regs *regs) diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c index bae9351e947..9014df6f605 100644 --- a/drivers/edac/edac_mce_amd.c +++ b/drivers/edac/edac_mce_amd.c @@ -365,11 +365,10 @@ static int amd_decode_mce(struct notifier_block *nb, unsigned long val, pr_emerg("MC%d_STATUS: ", m->bank); - pr_cont("%sorrected error, report: %s, MiscV: %svalid, " + pr_cont("%sorrected error, other errors lost: %s, " "CPU context corrupt: %s", ((m->status & MCI_STATUS_UC) ? "Unc" : "C"), - ((m->status & MCI_STATUS_EN) ? "yes" : "no"), - ((m->status & MCI_STATUS_MISCV) ? "" : "in"), + ((m->status & MCI_STATUS_OVER) ? "yes" : "no"), ((m->status & MCI_STATUS_PCC) ? "yes" : "no")); /* do the two bits[14:13] together */ @@ -426,11 +425,15 @@ static struct notifier_block amd_mce_dec_nb = { static int __init mce_amd_init(void) { /* - * We can decode MCEs for Opteron and later CPUs: + * We can decode MCEs for K8, F10h and F11h CPUs: */ - if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && - (boot_cpu_data.x86 >= 0xf)) - atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb); + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + return 0; + + if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) + return 0; + + atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb); return 0; } diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index ca7ca56661e..b42a0bde849 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -81,6 +81,10 @@ static int close_transaction(struct fw_transaction *transaction, spin_lock_irqsave(&card->lock, flags); list_for_each_entry(t, &card->transaction_list, link) { if (t == transaction) { + if (!del_timer(&t->split_timeout_timer)) { + spin_unlock_irqrestore(&card->lock, flags); + goto timed_out; + } list_del_init(&t->link); card->tlabel_mask &= ~(1ULL << t->tlabel); break; @@ -89,11 +93,11 @@ static int close_transaction(struct fw_transaction *transaction, spin_unlock_irqrestore(&card->lock, flags); if (&t->link != &card->transaction_list) { - del_timer_sync(&t->split_timeout_timer); t->callback(card, rcode, NULL, 0, t->callback_data); return 0; } + timed_out: return -ENOENT; } @@ -921,6 +925,10 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) spin_lock_irqsave(&card->lock, flags); list_for_each_entry(t, &card->transaction_list, link) { if (t->node_id == source && t->tlabel == tlabel) { + if (!del_timer(&t->split_timeout_timer)) { + spin_unlock_irqrestore(&card->lock, flags); + goto timed_out; + } list_del_init(&t->link); card->tlabel_mask &= ~(1ULL << t->tlabel); break; @@ -929,6 +937,7 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) spin_unlock_irqrestore(&card->lock, flags); if (&t->link == &card->transaction_list) { + timed_out: fw_notify("Unsolicited response (source %x, tlabel %x)\n", source, tlabel); return; @@ -963,8 +972,6 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) break; } - del_timer_sync(&t->split_timeout_timer); - /* * The response handler may be executed while the request handler * is still pending. Cancel the request handler. diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index da17d409a24..33f8421c71c 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -579,7 +579,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net, if (!peer) { fw_notify("No peer for ARP packet from %016llx\n", (unsigned long long)peer_guid); - goto failed_proto; + goto no_peer; } /* @@ -656,7 +656,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net, return 0; - failed_proto: + no_peer: net->stats.rx_errors++; net->stats.rx_dropped++; @@ -664,7 +664,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net, if (netif_queue_stopped(net)) netif_wake_queue(net); - return 0; + return -ENOENT; } static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, @@ -701,7 +701,7 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, fw_error("out of memory\n"); net->stats.rx_dropped++; - return -1; + return -ENOMEM; } skb_reserve(skb, (net->hard_header_len + 15) & ~15); memcpy(skb_put(skb, len), buf, len); @@ -726,8 +726,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, spin_lock_irqsave(&dev->lock, flags); peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation); - if (!peer) - goto bad_proto; + if (!peer) { + retval = -ENOENT; + goto fail; + } pd = fwnet_pd_find(peer, datagram_label); if (pd == NULL) { @@ -741,7 +743,7 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, dg_size, buf, fg_off, len); if (pd == NULL) { retval = -ENOMEM; - goto bad_proto; + goto fail; } peer->pdg_size++; } else { @@ -755,9 +757,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, pd = fwnet_pd_new(net, peer, datagram_label, dg_size, buf, fg_off, len); if (pd == NULL) { - retval = -ENOMEM; peer->pdg_size--; - goto bad_proto; + retval = -ENOMEM; + goto fail; } } else { if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) { @@ -768,7 +770,8 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, */ fwnet_pd_delete(pd); peer->pdg_size--; - goto bad_proto; + retval = -ENOMEM; + goto fail; } } } /* new datagram or add to existing one */ @@ -794,14 +797,13 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, spin_unlock_irqrestore(&dev->lock, flags); return 0; - - bad_proto: + fail: spin_unlock_irqrestore(&dev->lock, flags); if (netif_queue_stopped(net)) netif_wake_queue(net); - return 0; + return retval; } static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 7f03540cabe..be29b0bb247 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -694,7 +694,15 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) log_ar_at_event('R', p.speed, p.header, evt); /* - * The OHCI bus reset handler synthesizes a phy packet with + * Several controllers, notably from NEC and VIA, forget to + * write ack_complete status at PHY packet reception. + */ + if (evt == OHCI1394_evt_no_status && + (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4)) + p.ack = ACK_COMPLETE; + + /* + * The OHCI bus reset handler synthesizes a PHY packet with * the new generation number when a bus reset happens (see * section 8.4.2.3). This helps us determine when a request * was received and make sure we send the response in the same diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 9f76171717e..bfae4b30979 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -450,7 +450,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request, if (&orb->link != &lu->orb_list) { orb->callback(orb, &status); - kref_put(&orb->kref, free_orb); + kref_put(&orb->kref, free_orb); /* orb callback reference */ } else { fw_error("status write for unknown orb\n"); } @@ -472,20 +472,28 @@ static void complete_transaction(struct fw_card *card, int rcode, * So this callback only sets the rcode if it hasn't already * been set and only does the cleanup if the transaction * failed and we didn't already get a status write. + * + * Here we treat RCODE_CANCELLED like RCODE_COMPLETE because some + * OXUF936QSE firmwares occasionally respond after Split_Timeout and + * complete the ORB just fine. Note, we also get RCODE_CANCELLED + * from sbp2_cancel_orbs() if fw_cancel_transaction() == 0. */ spin_lock_irqsave(&card->lock, flags); if (orb->rcode == -1) orb->rcode = rcode; - if (orb->rcode != RCODE_COMPLETE) { + + if (orb->rcode != RCODE_COMPLETE && orb->rcode != RCODE_CANCELLED) { list_del(&orb->link); spin_unlock_irqrestore(&card->lock, flags); + orb->callback(orb, NULL); + kref_put(&orb->kref, free_orb); /* orb callback reference */ } else { spin_unlock_irqrestore(&card->lock, flags); } - kref_put(&orb->kref, free_orb); + kref_put(&orb->kref, free_orb); /* transaction callback reference */ } static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, @@ -501,9 +509,8 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, list_add_tail(&orb->link, &lu->orb_list); spin_unlock_irqrestore(&device->card->lock, flags); - /* Take a ref for the orb list and for the transaction callback. */ - kref_get(&orb->kref); - kref_get(&orb->kref); + kref_get(&orb->kref); /* transaction callback reference */ + kref_get(&orb->kref); /* orb callback reference */ fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST, node_id, generation, device->max_speed, offset, @@ -525,11 +532,11 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) list_for_each_entry_safe(orb, next, &list, link) { retval = 0; - if (fw_cancel_transaction(device->card, &orb->t) == 0) - continue; + fw_cancel_transaction(device->card, &orb->t); orb->rcode = RCODE_CANCELLED; orb->callback(orb, NULL); + kref_put(&orb->kref, free_orb); /* orb callback reference */ } return retval; diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index a6c670b8ce5..280c9b5ad9e 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -122,18 +122,10 @@ config ISCSI_IBFT_FIND is necessary for iSCSI Boot Firmware Table Attributes module to work properly. -config ISCSI_BOOT_SYSFS - tristate "iSCSI Boot Sysfs Interface" - default n - help - This option enables support for exposing iSCSI boot information - via sysfs to userspace. If you wish to export this information, - say Y. Otherwise, say N. - config ISCSI_IBFT tristate "iSCSI Boot Firmware Table Attributes module" select ISCSI_BOOT_SYSFS - depends on ISCSI_IBFT_FIND + depends on ISCSI_IBFT_FIND && SCSI default n help This option enables support for detection and exposing of iSCSI diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 5fe7e166292..1c3c17343db 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile @@ -10,5 +10,4 @@ obj-$(CONFIG_DCDBAS) += dcdbas.o obj-$(CONFIG_DMIID) += dmi-id.o obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o -obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index 4f04ec0410a..6148a1c6789 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c @@ -727,8 +727,10 @@ static void ibft_unregister(void) static void ibft_cleanup(void) { - ibft_unregister(); - iscsi_boot_destroy_kset(boot_kset); + if (boot_kset) { + ibft_unregister(); + iscsi_boot_destroy_kset(boot_kset); + } } static void __exit ibft_exit(void) diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 7e31d434834..d2ab01e90a9 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -34,6 +34,9 @@ #include "drm_crtc_helper.h" #include "drm_fb_helper.h" +static bool drm_kms_helper_poll = true; +module_param_named(poll, drm_kms_helper_poll, bool, 0600); + static void drm_mode_validate_flag(struct drm_connector *connector, int flags) { @@ -99,8 +102,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, connector->status = connector_status_disconnected; if (connector->funcs->force) connector->funcs->force(connector); - } else + } else { connector->status = connector->funcs->detect(connector); + drm_helper_hpd_irq_event(dev); + } if (connector->status == connector_status_disconnected) { DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", @@ -110,11 +115,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, } count = (*connector_funcs->get_modes)(connector); - if (!count) { + if (count == 0 && connector->status == connector_status_connected) count = drm_add_modes_noedid(connector, 1024, 768); - if (!count) - return 0; - } + if (count == 0) + goto prune; drm_mode_connector_list_update(connector); @@ -840,6 +844,9 @@ static void output_poll_execute(struct work_struct *work) enum drm_connector_status old_status, status; bool repoll = false, changed = false; + if (!drm_kms_helper_poll) + return; + mutex_lock(&dev->mode_config.mutex); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { @@ -890,6 +897,9 @@ void drm_kms_helper_poll_enable(struct drm_device *dev) bool poll = false; struct drm_connector *connector; + if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll) + return; + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (connector->polled) poll = true; @@ -919,8 +929,10 @@ void drm_helper_hpd_irq_event(struct drm_device *dev) { if (!dev->mode_config.poll_enabled) return; + /* kill timer and schedule immediate execution, this doesn't block */ cancel_delayed_work(&dev->mode_config.output_poll_work); - queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0); + if (drm_kms_helper_poll) + queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0); } EXPORT_SYMBOL(drm_helper_hpd_irq_event); diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 90288ec7c28..84da748555b 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -55,6 +55,9 @@ static int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv); +#define DRM_IOCTL_DEF(ioctl, _func, _flags) \ + [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0} + /** Ioctl table */ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0), @@ -421,6 +424,7 @@ long drm_ioctl(struct file *filp, int retcode = -EINVAL; char stack_kdata[128]; char *kdata = NULL; + unsigned int usize, asize; dev = file_priv->minor->dev; atomic_inc(&dev->ioctl_count); @@ -436,11 +440,18 @@ long drm_ioctl(struct file *filp, ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) goto err_i1; if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) && - (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) + (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { + u32 drv_size; ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; + drv_size = _IOC_SIZE(ioctl->cmd_drv); + usize = asize = _IOC_SIZE(cmd); + if (drv_size > asize) + asize = drv_size; + } else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) { ioctl = &drm_ioctls[nr]; cmd = ioctl->cmd; + usize = asize = _IOC_SIZE(cmd); } else goto err_i1; @@ -460,10 +471,10 @@ long drm_ioctl(struct file *filp, retcode = -EACCES; } else { if (cmd & (IOC_IN | IOC_OUT)) { - if (_IOC_SIZE(cmd) <= sizeof(stack_kdata)) { + if (asize <= sizeof(stack_kdata)) { kdata = stack_kdata; } else { - kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL); + kdata = kmalloc(asize, GFP_KERNEL); if (!kdata) { retcode = -ENOMEM; goto err_i1; @@ -473,11 +484,13 @@ long drm_ioctl(struct file *filp, if (cmd & IOC_IN) { if (copy_from_user(kdata, (void __user *)arg, - _IOC_SIZE(cmd)) != 0) { + usize) != 0) { retcode = -EFAULT; goto err_i1; } - } + } else + memset(kdata, 0, usize); + if (ioctl->flags & DRM_UNLOCKED) retcode = func(dev, kdata, file_priv); else { @@ -488,7 +501,7 @@ long drm_ioctl(struct file *filp, if (cmd & IOC_OUT) { if (copy_to_user((void __user *)arg, kdata, - _IOC_SIZE(cmd)) != 0) + usize) != 0) retcode = -EFAULT; } } diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index de82e201d68..6a5e403f9aa 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -94,10 +94,11 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_conn int i; enum drm_connector_force force = DRM_FORCE_UNSPECIFIED; struct drm_fb_helper_cmdline_mode *cmdline_mode; - struct drm_connector *connector = fb_helper_conn->connector; + struct drm_connector *connector; if (!fb_helper_conn) return false; + connector = fb_helper_conn->connector; cmdline_mode = &fb_helper_conn->cmdline_mode; if (!mode_option) @@ -369,7 +370,7 @@ static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) } static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn); -static void drm_fb_helper_sysrq(int dummy1, struct tty_struct *dummy3) +static void drm_fb_helper_sysrq(int dummy1) { schedule_work(&drm_fb_helper_restore_work); } diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 3a652a65546..b744dad5c23 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -41,6 +41,7 @@ /* from BKL pushdown: note that nothing else serializes idr_find() */ DEFINE_MUTEX(drm_global_mutex); +EXPORT_SYMBOL(drm_global_mutex); static int drm_open_helper(struct inode *inode, struct file *filp, struct drm_device * dev); diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index e2f70a516c3..9bf93bc9a32 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c @@ -92,7 +92,9 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) } /* Contention */ + mutex_unlock(&drm_global_mutex); schedule(); + mutex_lock(&drm_global_mutex); if (signal_pending(current)) { ret = -EINTR; break; diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index da99edc5088..a6bfc302ed9 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -285,21 +285,21 @@ void drm_mm_put_block(struct drm_mm_node *cur) EXPORT_SYMBOL(drm_mm_put_block); -static int check_free_mm_node(struct drm_mm_node *entry, unsigned long size, - unsigned alignment) +static int check_free_hole(unsigned long start, unsigned long end, + unsigned long size, unsigned alignment) { unsigned wasted = 0; - if (entry->size < size) + if (end - start < size) return 0; if (alignment) { - register unsigned tmp = entry->start % alignment; + unsigned tmp = start % alignment; if (tmp) wasted = alignment - tmp; } - if (entry->size >= size + wasted) { + if (end >= start + size + wasted) { return 1; } @@ -320,7 +320,8 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, best_size = ~0UL; list_for_each_entry(entry, &mm->free_stack, free_stack) { - if (!check_free_mm_node(entry, size, alignment)) + if (!check_free_hole(entry->start, entry->start + entry->size, + size, alignment)) continue; if (!best_match) @@ -353,10 +354,12 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, best_size = ~0UL; list_for_each_entry(entry, &mm->free_stack, free_stack) { - if (entry->start > end || (entry->start+entry->size) < start) - continue; + unsigned long adj_start = entry->start < start ? + start : entry->start; + unsigned long adj_end = entry->start + entry->size > end ? + end : entry->start + entry->size; - if (!check_free_mm_node(entry, size, alignment)) + if (!check_free_hole(adj_start, adj_end, size, alignment)) continue; if (!best_match) @@ -449,7 +452,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node) node->free_stack.prev = prev_free; node->free_stack.next = next_free; - if (check_free_mm_node(node, mm->scan_size, mm->scan_alignment)) { + if (check_free_hole(node->start, node->start + node->size, + mm->scan_size, mm->scan_alignment)) { mm->scan_hit_start = node->start; mm->scan_hit_size = node->size; diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index f1f473ea97d..949326d2a8e 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -251,7 +251,10 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK; /* Fill in HSync values */ drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2; - drm_mode->hsync_start = drm_mode->hsync_end = CVT_RB_H_SYNC; + drm_mode->hsync_start = drm_mode->hsync_end - CVT_RB_H_SYNC; + /* Fill in VSync values */ + drm_mode->vsync_start = drm_mode->vdisplay + CVT_RB_VFPORCH; + drm_mode->vsync_end = drm_mode->vsync_start + vsync; } /* 15/13. Find pixel clock frequency (kHz for xf86) */ drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod; diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index 3778360ecee..fda67468e60 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -138,7 +138,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) break; } - if (!agpmem) + if (&agpmem->head == &dev->agp->memory) goto vm_fault_error; /* diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 0e6c131313d..61b4caf220f 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -1255,21 +1255,21 @@ long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } struct drm_ioctl_desc i810_ioctls[] = { - DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED), }; int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c index 5168862c922..671aa18415a 100644 --- a/drivers/gpu/drm/i830/i830_dma.c +++ b/drivers/gpu/drm/i830/i830_dma.c @@ -1524,20 +1524,20 @@ long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } struct drm_ioctl_desc i830_ioctls[] = { - DRM_IOCTL_DEF(DRM_I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED), }; int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls); diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index da78f2c0d90..5c8e53458ed 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -8,6 +8,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ i915_suspend.o \ i915_gem.o \ i915_gem_debug.o \ + i915_gem_evict.o \ i915_gem_tiling.o \ i915_trace_points.o \ intel_display.o \ @@ -18,6 +19,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ intel_hdmi.o \ intel_sdvo.o \ intel_modes.o \ + intel_panel.o \ intel_i2c.o \ intel_fb.o \ intel_tv.o \ diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h index 0d6ff640e1c..8c2ad014c47 100644 --- a/drivers/gpu/drm/i915/dvo.h +++ b/drivers/gpu/drm/i915/dvo.h @@ -30,20 +30,17 @@ #include "intel_drv.h" struct intel_dvo_device { - char *name; + const char *name; int type; /* DVOA/B/C output register */ u32 dvo_reg; /* GPIO register used for i2c bus to control this device */ u32 gpio; int slave_addr; - struct i2c_adapter *i2c_bus; const struct intel_dvo_dev_ops *dev_ops; void *dev_priv; - - struct drm_display_mode *panel_fixed_mode; - bool panel_wants_dither; + struct i2c_adapter *i2c_bus; }; struct intel_dvo_dev_ops { diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 9214119c015..5e43d707678 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -31,6 +31,7 @@ #include <linux/slab.h> #include "drmP.h" #include "drm.h" +#include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" @@ -121,6 +122,54 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) return 0; } +static int i915_gem_pageflip_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + unsigned long flags; + struct intel_crtc *crtc; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { + const char *pipe = crtc->pipe ? "B" : "A"; + const char *plane = crtc->plane ? "B" : "A"; + struct intel_unpin_work *work; + + spin_lock_irqsave(&dev->event_lock, flags); + work = crtc->unpin_work; + if (work == NULL) { + seq_printf(m, "No flip due on pipe %s (plane %s)\n", + pipe, plane); + } else { + if (!work->pending) { + seq_printf(m, "Flip queued on pipe %s (plane %s)\n", + pipe, plane); + } else { + seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n", + pipe, plane); + } + if (work->enable_stall_check) + seq_printf(m, "Stall check enabled, "); + else + seq_printf(m, "Stall check waiting for page flip ioctl, "); + seq_printf(m, "%d prepares\n", work->pending); + + if (work->old_fb_obj) { + struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj); + if(obj_priv) + seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); + } + if (work->pending_flip_obj) { + struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj); + if(obj_priv) + seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); + } + } + spin_unlock_irqrestore(&dev->event_lock, flags); + } + + return 0; +} + static int i915_gem_request_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -467,6 +516,9 @@ static int i915_error_state(struct seq_file *m, void *unused) } } + if (error->overlay) + intel_overlay_print_error_state(m, error->overlay); + out: spin_unlock_irqrestore(&dev_priv->error_lock, flags); @@ -774,6 +826,7 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, + {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, {"i915_gem_request", i915_gem_request_info, 0}, {"i915_gem_seqno", i915_gem_seqno_info, 0}, {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index f19ffe87af3..9d67b485303 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -499,6 +499,13 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, } } + + if (IS_G4X(dev) || IS_IRONLAKE(dev)) { + BEGIN_LP_RING(2); + OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); + OUT_RING(MI_NOOP); + ADVANCE_LP_RING(); + } i915_emit_breadcrumb(dev); return 0; @@ -613,8 +620,10 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, ret = copy_from_user(cliprects, batch->cliprects, batch->num_cliprects * sizeof(struct drm_clip_rect)); - if (ret != 0) + if (ret != 0) { + ret = -EFAULT; goto fail_free; + } } mutex_lock(&dev->struct_mutex); @@ -655,8 +664,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, return -ENOMEM; ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); - if (ret != 0) + if (ret != 0) { + ret = -EFAULT; goto fail_batch_free; + } if (cmdbuf->num_cliprects) { cliprects = kcalloc(cmdbuf->num_cliprects, @@ -669,8 +680,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, ret = copy_from_user(cliprects, cmdbuf->cliprects, cmdbuf->num_cliprects * sizeof(struct drm_clip_rect)); - if (ret != 0) + if (ret != 0) { + ret = -EFAULT; goto fail_clip_free; + } } mutex_lock(&dev->struct_mutex); @@ -878,7 +891,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev) int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; u32 temp_lo, temp_hi = 0; u64 mchbar_addr; - int ret = 0; + int ret; if (IS_I965G(dev)) pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); @@ -888,22 +901,23 @@ intel_alloc_mchbar_resource(struct drm_device *dev) /* If ACPI doesn't have it, assume we need to allocate it ourselves */ #ifdef CONFIG_PNP if (mchbar_addr && - pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { - ret = 0; - goto out; - } + pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) + return 0; #endif /* Get some space for it */ - ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res, + dev_priv->mch_res.name = "i915 MCHBAR"; + dev_priv->mch_res.flags = IORESOURCE_MEM; + ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, + &dev_priv->mch_res, MCHBAR_SIZE, MCHBAR_SIZE, PCIBIOS_MIN_MEM, - 0, pcibios_align_resource, + 0, pcibios_align_resource, dev_priv->bridge_dev); if (ret) { DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); dev_priv->mch_res.start = 0; - goto out; + return ret; } if (IS_I965G(dev)) @@ -912,8 +926,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev) pci_write_config_dword(dev_priv->bridge_dev, reg, lower_32_bits(dev_priv->mch_res.start)); -out: - return ret; + return 0; } /* Setup MCHBAR if possible, return true if we should disable it again */ @@ -2075,6 +2088,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto free_priv; } + /* overlay on gen2 is broken and can't address above 1G */ + if (IS_GEN2(dev)) + dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); + dev_priv->regs = ioremap(base, size); if (!dev_priv->regs) { DRM_ERROR("failed to map registers\n"); @@ -2360,46 +2377,46 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) } struct drm_ioctl_desc i915_ioctls[] = { - DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), - DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), - DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), - DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 5044f653e8e..216deb57978 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -61,91 +61,86 @@ extern int intel_agp_enabled; .driver_data = (unsigned long) info } static const struct intel_device_info intel_i830_info = { - .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, + .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, }; static const struct intel_device_info intel_845g_info = { - .is_i8xx = 1, + .gen = 2, .is_i8xx = 1, }; static const struct intel_device_info intel_i85x_info = { - .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, + .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, .cursor_needs_physical = 1, }; static const struct intel_device_info intel_i865g_info = { - .is_i8xx = 1, + .gen = 2, .is_i8xx = 1, }; static const struct intel_device_info intel_i915g_info = { - .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, + .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, }; static const struct intel_device_info intel_i915gm_info = { - .is_i9xx = 1, .is_mobile = 1, + .gen = 3, .is_i9xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, }; static const struct intel_device_info intel_i945g_info = { - .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, + .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, }; static const struct intel_device_info intel_i945gm_info = { - .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, + .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_hotplug = 1, .cursor_needs_physical = 1, }; static const struct intel_device_info intel_i965g_info = { - .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, + .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, + .has_hotplug = 1, }; static const struct intel_device_info intel_i965gm_info = { - .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1, - .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, - .has_hotplug = 1, + .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1, + .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, }; static const struct intel_device_info intel_g33_info = { - .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1, - .has_hotplug = 1, + .gen = 3, .is_g33 = 1, .is_i9xx = 1, + .need_gfx_hws = 1, .has_hotplug = 1, }; static const struct intel_device_info intel_g45_info = { - .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, - .has_pipe_cxsr = 1, - .has_hotplug = 1, + .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, + .has_pipe_cxsr = 1, .has_hotplug = 1, }; static const struct intel_device_info intel_gm45_info = { - .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, + .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, - .has_pipe_cxsr = 1, - .has_hotplug = 1, + .has_pipe_cxsr = 1, .has_hotplug = 1, }; static const struct intel_device_info intel_pineview_info = { - .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, - .need_gfx_hws = 1, - .has_hotplug = 1, + .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, + .need_gfx_hws = 1, .has_hotplug = 1, }; static const struct intel_device_info intel_ironlake_d_info = { - .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, - .has_pipe_cxsr = 1, - .has_hotplug = 1, + .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, + .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, }; static const struct intel_device_info intel_ironlake_m_info = { - .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, - .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, - .has_hotplug = 1, + .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, + .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, }; static const struct intel_device_info intel_sandybridge_d_info = { - .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, - .has_hotplug = 1, .is_gen6 = 1, + .gen = 6, .is_i965g = 1, .is_i9xx = 1, + .need_gfx_hws = 1, .has_hotplug = 1, }; static const struct intel_device_info intel_sandybridge_m_info = { - .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1, - .has_hotplug = 1, .is_gen6 = 1, + .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, + .need_gfx_hws = 1, .has_hotplug = 1, }; static const struct pci_device_id pciidlist[] = { /* aka */ @@ -180,7 +175,12 @@ static const struct pci_device_id pciidlist[] = { /* aka */ INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), + INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info), + INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info), INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), + INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info), + INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), + INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info), {0, 0, 0} }; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 906663b9929..af4a263cf25 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -113,6 +113,9 @@ struct intel_opregion { int enabled; }; +struct intel_overlay; +struct intel_overlay_error_state; + struct drm_i915_master_private { drm_local_map_t *sarea; struct _drm_i915_sarea *sarea_priv; @@ -166,6 +169,7 @@ struct drm_i915_error_state { u32 purgeable:1; } *active_bo; u32 active_bo_count; + struct intel_overlay_error_state *overlay; }; struct drm_i915_display_funcs { @@ -186,9 +190,8 @@ struct drm_i915_display_funcs { /* clock gating init */ }; -struct intel_overlay; - struct intel_device_info { + u8 gen; u8 is_mobile : 1; u8 is_i8xx : 1; u8 is_i85x : 1; @@ -204,7 +207,6 @@ struct intel_device_info { u8 is_broadwater : 1; u8 is_crestline : 1; u8 is_ironlake : 1; - u8 is_gen6 : 1; u8 has_fbc : 1; u8 has_rc6 : 1; u8 has_pipe_cxsr : 1; @@ -242,6 +244,7 @@ typedef struct drm_i915_private { struct pci_dev *bridge_dev; struct intel_ring_buffer render_ring; struct intel_ring_buffer bsd_ring; + uint32_t next_seqno; drm_dma_handle_t *status_page_dmah; void *seqno_page; @@ -251,6 +254,7 @@ typedef struct drm_i915_private { drm_local_map_t hws_map; struct drm_gem_object *seqno_obj; struct drm_gem_object *pwrctx; + struct drm_gem_object *renderctx; struct resource mch_res; @@ -285,6 +289,9 @@ typedef struct drm_i915_private { unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; int vblank_pipe; int num_pipe; + u32 flush_rings; +#define FLUSH_RENDER_RING 0x1 +#define FLUSH_BSD_RING 0x2 /* For hangcheck timer */ #define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */ @@ -568,8 +575,6 @@ typedef struct drm_i915_private { */ struct delayed_work retire_work; - uint32_t next_gem_seqno; - /** * Waiting sequence number, if any */ @@ -610,6 +615,8 @@ typedef struct drm_i915_private { struct sdvo_device_mapping sdvo_mappings[2]; /* indicate whether the LVDS_BORDER should be enabled or not */ unsigned int lvds_border_bits; + /* Panel fitter placement and size for Ironlake+ */ + u32 pch_pf_pos, pch_pf_size; struct drm_crtc *plane_to_crtc_mapping[2]; struct drm_crtc *pipe_to_crtc_mapping[2]; @@ -669,6 +676,8 @@ struct drm_i915_gem_object { struct list_head list; /** This object's place on GPU write list */ struct list_head gpu_write_list; + /** This object's place on eviction list */ + struct list_head evict_list; /** * This is set if the object is on the active or flushing lists @@ -978,6 +987,7 @@ int i915_gem_init_ringbuffer(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev); int i915_gem_do_init(struct drm_device *dev, unsigned long start, unsigned long end); +int i915_gpu_idle(struct drm_device *dev); int i915_gem_idle(struct drm_device *dev); uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv, @@ -991,7 +1001,9 @@ int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write); int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj); int i915_gem_attach_phys_object(struct drm_device *dev, - struct drm_gem_object *obj, int id); + struct drm_gem_object *obj, + int id, + int align); void i915_gem_detach_phys_object(struct drm_device *dev, struct drm_gem_object *obj); void i915_gem_free_all_phys_object(struct drm_device *dev); @@ -1003,6 +1015,11 @@ int i915_gem_object_flush_write_domain(struct drm_gem_object *obj); void i915_gem_shrinker_init(void); void i915_gem_shrinker_exit(void); +/* i915_gem_evict.c */ +int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment); +int i915_gem_evict_everything(struct drm_device *dev); +int i915_gem_evict_inactive(struct drm_device *dev); + /* i915_gem_tiling.c */ void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); @@ -1066,6 +1083,10 @@ extern bool ironlake_set_drps(struct drm_device *dev, u8 val); extern void intel_detect_pch (struct drm_device *dev); extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); +/* overlay */ +extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); +extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error); + /** * Lock test for when it's just for synchronization of ring access. * @@ -1092,26 +1113,26 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); #define I915_VERBOSE 0 #define BEGIN_LP_RING(n) do { \ - drm_i915_private_t *dev_priv = dev->dev_private; \ + drm_i915_private_t *dev_priv__ = dev->dev_private; \ if (I915_VERBOSE) \ DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \ - intel_ring_begin(dev, &dev_priv->render_ring, (n)); \ + intel_ring_begin(dev, &dev_priv__->render_ring, (n)); \ } while (0) #define OUT_RING(x) do { \ - drm_i915_private_t *dev_priv = dev->dev_private; \ + drm_i915_private_t *dev_priv__ = dev->dev_private; \ if (I915_VERBOSE) \ DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \ - intel_ring_emit(dev, &dev_priv->render_ring, x); \ + intel_ring_emit(dev, &dev_priv__->render_ring, x); \ } while (0) #define ADVANCE_LP_RING() do { \ - drm_i915_private_t *dev_priv = dev->dev_private; \ + drm_i915_private_t *dev_priv__ = dev->dev_private; \ if (I915_VERBOSE) \ DRM_DEBUG("ADVANCE_LP_RING %x\n", \ - dev_priv->render_ring.tail); \ - intel_ring_advance(dev, &dev_priv->render_ring); \ + dev_priv__->render_ring.tail); \ + intel_ring_advance(dev, &dev_priv__->render_ring); \ } while(0) /** @@ -1141,7 +1162,6 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); #define IS_845G(dev) ((dev)->pci_device == 0x2562) #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) #define IS_I865G(dev) ((dev)->pci_device == 0x2572) -#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx) #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) #define IS_I945G(dev) ((dev)->pci_device == 0x2772) @@ -1160,27 +1180,13 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) #define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) #define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) -#define IS_GEN6(dev) (INTEL_INFO(dev)->is_gen6) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) -#define IS_GEN3(dev) (IS_I915G(dev) || \ - IS_I915GM(dev) || \ - IS_I945G(dev) || \ - IS_I945GM(dev) || \ - IS_G33(dev) || \ - IS_PINEVIEW(dev)) -#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \ - (dev)->pci_device == 0x2982 || \ - (dev)->pci_device == 0x2992 || \ - (dev)->pci_device == 0x29A2 || \ - (dev)->pci_device == 0x2A02 || \ - (dev)->pci_device == 0x2A12 || \ - (dev)->pci_device == 0x2E02 || \ - (dev)->pci_device == 0x2E12 || \ - (dev)->pci_device == 0x2E22 || \ - (dev)->pci_device == 0x2E32 || \ - (dev)->pci_device == 0x2A42 || \ - (dev)->pci_device == 0x2E42) +#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) +#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) +#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) +#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) +#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) #define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev)) #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0758c7802e6..16fca1d1799 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -34,7 +34,9 @@ #include <linux/slab.h> #include <linux/swap.h> #include <linux/pci.h> +#include <linux/intel-gtt.h> +static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); @@ -48,8 +50,6 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment); static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); -static int i915_gem_evict_something(struct drm_device *dev, int min_size); -static int i915_gem_evict_from_inactive_list(struct drm_device *dev); static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file_priv); @@ -58,6 +58,14 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj); static LIST_HEAD(shrink_list); static DEFINE_SPINLOCK(shrink_list_lock); +static inline bool +i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv) +{ + return obj_priv->gtt_space && + !obj_priv->active && + obj_priv->pin_count == 0; +} + int i915_gem_do_init(struct drm_device *dev, unsigned long start, unsigned long end) { @@ -128,12 +136,15 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, return -ENOMEM; ret = drm_gem_handle_create(file_priv, obj, &handle); - drm_gem_object_unreference_unlocked(obj); - if (ret) + if (ret) { + drm_gem_object_unreference_unlocked(obj); return ret; + } - args->handle = handle; + /* Sink the floating reference from kref_init(handlecount) */ + drm_gem_object_handle_unreference_unlocked(obj); + args->handle = handle; return 0; } @@ -313,7 +324,8 @@ i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) if (ret == -ENOMEM) { struct drm_device *dev = obj->dev; - ret = i915_gem_evict_something(dev, obj->size); + ret = i915_gem_evict_something(dev, obj->size, + i915_gem_get_gtt_alignment(obj)); if (ret) return ret; @@ -1036,6 +1048,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); } + + /* Maintain LRU order of "inactive" objects */ + if (ret == 0 && i915_gem_object_is_inactive(obj_priv)) + list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); return ret; @@ -1137,7 +1154,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct drm_gem_object *obj = vma->vm_private_data; struct drm_device *dev = obj->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); pgoff_t page_offset; unsigned long pfn; @@ -1155,8 +1172,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (ret) goto unlock; - list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); - ret = i915_gem_object_set_to_gtt_domain(obj, write); if (ret) goto unlock; @@ -1169,6 +1184,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) goto unlock; } + if (i915_gem_object_is_inactive(obj_priv)) + list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + page_offset; @@ -1363,7 +1381,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_i915_gem_mmap_gtt *args = data; - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; int ret; @@ -1409,7 +1426,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, mutex_unlock(&dev->struct_mutex); return ret; } - list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); } drm_gem_object_unreference(obj); @@ -1493,9 +1509,16 @@ i915_gem_object_truncate(struct drm_gem_object *obj) struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct inode *inode; + /* Our goal here is to return as much of the memory as + * is possible back to the system as we are called from OOM. + * To do this we must instruct the shmfs to drop all of its + * backing pages, *now*. Here we mirror the actions taken + * when by shmem_delete_inode() to release the backing store. + */ inode = obj->filp->f_path.dentry->d_inode; - if (inode->i_op->truncate) - inode->i_op->truncate (inode); + truncate_inode_pages(inode->i_mapping, 0); + if (inode->i_op->truncate_range) + inode->i_op->truncate_range(inode, 0, (loff_t)-1); obj_priv->madv = __I915_MADV_PURGED; } @@ -1887,19 +1910,6 @@ i915_gem_flush(struct drm_device *dev, flush_domains); } -static void -i915_gem_flush_ring(struct drm_device *dev, - uint32_t invalidate_domains, - uint32_t flush_domains, - struct intel_ring_buffer *ring) -{ - if (flush_domains & I915_GEM_DOMAIN_CPU) - drm_agp_chipset_flush(dev); - ring->flush(dev, ring, - invalidate_domains, - flush_domains); -} - /** * Ensures that all rendering to the object has completed and the object is * safe to unbind from the GTT or access from the CPU. @@ -1973,8 +1983,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj) * cause memory corruption through use-after-free. */ - BUG_ON(obj_priv->active); - /* release the fence reg _after_ flushing */ if (obj_priv->fence_reg != I915_FENCE_REG_NONE) i915_gem_clear_fence_reg(obj); @@ -2010,34 +2018,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) return ret; } -static struct drm_gem_object * -i915_gem_find_inactive_object(struct drm_device *dev, int min_size) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; - struct drm_gem_object *best = NULL; - struct drm_gem_object *first = NULL; - - /* Try to find the smallest clean object */ - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { - struct drm_gem_object *obj = &obj_priv->base; - if (obj->size >= min_size) { - if ((!obj_priv->dirty || - i915_gem_object_is_purgeable(obj_priv)) && - (!best || obj->size < best->size)) { - best = obj; - if (best->size == min_size) - return best; - } - if (!first) - first = obj; - } - } - - return best ? best : first; -} - -static int +int i915_gpu_idle(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -2078,155 +2059,6 @@ i915_gpu_idle(struct drm_device *dev) return ret; } -static int -i915_gem_evict_everything(struct drm_device *dev) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - int ret; - bool lists_empty; - - spin_lock(&dev_priv->mm.active_list_lock); - lists_empty = (list_empty(&dev_priv->mm.inactive_list) && - list_empty(&dev_priv->mm.flushing_list) && - list_empty(&dev_priv->render_ring.active_list) && - (!HAS_BSD(dev) - || list_empty(&dev_priv->bsd_ring.active_list))); - spin_unlock(&dev_priv->mm.active_list_lock); - - if (lists_empty) - return -ENOSPC; - - /* Flush everything (on to the inactive lists) and evict */ - ret = i915_gpu_idle(dev); - if (ret) - return ret; - - BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); - - ret = i915_gem_evict_from_inactive_list(dev); - if (ret) - return ret; - - spin_lock(&dev_priv->mm.active_list_lock); - lists_empty = (list_empty(&dev_priv->mm.inactive_list) && - list_empty(&dev_priv->mm.flushing_list) && - list_empty(&dev_priv->render_ring.active_list) && - (!HAS_BSD(dev) - || list_empty(&dev_priv->bsd_ring.active_list))); - spin_unlock(&dev_priv->mm.active_list_lock); - BUG_ON(!lists_empty); - - return 0; -} - -static int -i915_gem_evict_something(struct drm_device *dev, int min_size) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - int ret; - - struct intel_ring_buffer *render_ring = &dev_priv->render_ring; - struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring; - for (;;) { - i915_gem_retire_requests(dev); - - /* If there's an inactive buffer available now, grab it - * and be done. - */ - obj = i915_gem_find_inactive_object(dev, min_size); - if (obj) { - struct drm_i915_gem_object *obj_priv; - -#if WATCH_LRU - DRM_INFO("%s: evicting %p\n", __func__, obj); -#endif - obj_priv = to_intel_bo(obj); - BUG_ON(obj_priv->pin_count != 0); - BUG_ON(obj_priv->active); - - /* Wait on the rendering and unbind the buffer. */ - return i915_gem_object_unbind(obj); - } - - /* If we didn't get anything, but the ring is still processing - * things, wait for the next to finish and hopefully leave us - * a buffer to evict. - */ - if (!list_empty(&render_ring->request_list)) { - struct drm_i915_gem_request *request; - - request = list_first_entry(&render_ring->request_list, - struct drm_i915_gem_request, - list); - - ret = i915_wait_request(dev, - request->seqno, request->ring); - if (ret) - return ret; - - continue; - } - - if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) { - struct drm_i915_gem_request *request; - - request = list_first_entry(&bsd_ring->request_list, - struct drm_i915_gem_request, - list); - - ret = i915_wait_request(dev, - request->seqno, request->ring); - if (ret) - return ret; - - continue; - } - - /* If we didn't have anything on the request list but there - * are buffers awaiting a flush, emit one and try again. - * When we wait on it, those buffers waiting for that flush - * will get moved to inactive. - */ - if (!list_empty(&dev_priv->mm.flushing_list)) { - struct drm_i915_gem_object *obj_priv; - - /* Find an object that we can immediately reuse */ - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { - obj = &obj_priv->base; - if (obj->size >= min_size) - break; - - obj = NULL; - } - - if (obj != NULL) { - uint32_t seqno; - - i915_gem_flush_ring(dev, - obj->write_domain, - obj->write_domain, - obj_priv->ring); - seqno = i915_add_request(dev, NULL, - obj->write_domain, - obj_priv->ring); - if (seqno == 0) - return -ENOMEM; - continue; - } - } - - /* If we didn't do any of the above, there's no single buffer - * large enough to swap out for the new one, so just evict - * everything and start again. (This should be rare.) - */ - if (!list_empty (&dev_priv->mm.inactive_list)) - return i915_gem_evict_from_inactive_list(dev); - else - return i915_gem_evict_everything(dev); - } -} - int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) @@ -2666,7 +2498,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) #if WATCH_LRU DRM_INFO("%s: GTT full, evicting something\n", __func__); #endif - ret = i915_gem_evict_something(dev, obj->size); + ret = i915_gem_evict_something(dev, obj->size, alignment); if (ret) return ret; @@ -2684,7 +2516,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) if (ret == -ENOMEM) { /* first try to clear up some space from the GTT */ - ret = i915_gem_evict_something(dev, obj->size); + ret = i915_gem_evict_something(dev, obj->size, + alignment); if (ret) { /* now try to shrink everyone else */ if (gfpmask) { @@ -2714,7 +2547,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) drm_mm_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; - ret = i915_gem_evict_something(dev, obj->size); + ret = i915_gem_evict_something(dev, obj->size, alignment); if (ret) return ret; @@ -2723,6 +2556,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) atomic_inc(&dev->gtt_count); atomic_add(obj->size, &dev->gtt_memory); + /* keep track of bounds object by adding it to the inactive list */ + list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + /* Assert that the object is not currently in any GPU domain. As it * wasn't in the GTT, there shouldn't be any way it could have been in * a GPU cache @@ -3117,6 +2953,7 @@ static void i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t invalidate_domains = 0; uint32_t flush_domains = 0; @@ -3179,6 +3016,13 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) obj->pending_write_domain = obj->write_domain; obj->read_domains = obj->pending_read_domains; + if (flush_domains & I915_GEM_GPU_DOMAINS) { + if (obj_priv->ring == &dev_priv->render_ring) + dev_priv->flush_rings |= FLUSH_RENDER_RING; + else if (obj_priv->ring == &dev_priv->bsd_ring) + dev_priv->flush_rings |= FLUSH_BSD_RING; + } + dev->invalidate_domains |= invalidate_domains; dev->flush_domains |= flush_domains; #if WATCH_BUF @@ -3718,7 +3562,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ring = &dev_priv->render_ring; } - if (args->buffer_count < 1) { DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); return -EINVAL; @@ -3746,6 +3589,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ret != 0) { DRM_ERROR("copy %d cliprects failed: %d\n", args->num_cliprects, ret); + ret = -EFAULT; goto pre_mutex_err; } } @@ -3892,6 +3736,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, */ dev->invalidate_domains = 0; dev->flush_domains = 0; + dev_priv->flush_rings = 0; for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; @@ -3912,16 +3757,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, i915_gem_flush(dev, dev->invalidate_domains, dev->flush_domains); - if (dev->flush_domains & I915_GEM_GPU_DOMAINS) { + if (dev_priv->flush_rings & FLUSH_RENDER_RING) (void)i915_add_request(dev, file_priv, - dev->flush_domains, - &dev_priv->render_ring); - - if (HAS_BSD(dev)) - (void)i915_add_request(dev, file_priv, - dev->flush_domains, - &dev_priv->bsd_ring); - } + dev->flush_domains, + &dev_priv->render_ring); + if (dev_priv->flush_rings & FLUSH_BSD_RING) + (void)i915_add_request(dev, file_priv, + dev->flush_domains, + &dev_priv->bsd_ring); } for (i = 0; i < args->buffer_count; i++) { @@ -4192,6 +4035,10 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) if (alignment == 0) alignment = i915_gem_get_gtt_alignment(obj); if (obj_priv->gtt_offset & (alignment - 1)) { + WARN(obj_priv->pin_count, + "bo is already pinned with incorrect alignment:" + " offset=%x, req.alignment=%x\n", + obj_priv->gtt_offset, alignment); ret = i915_gem_object_unbind(obj); if (ret) return ret; @@ -4213,8 +4060,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) atomic_inc(&dev->pin_count); atomic_add(obj->size, &dev->pin_memory); if (!obj_priv->active && - (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 && - !list_empty(&obj_priv->list)) + (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) list_del_init(&obj_priv->list); } i915_verify_inactive(dev, __FILE__, __LINE__); @@ -4359,22 +4205,34 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, } mutex_lock(&dev->struct_mutex); - /* Update the active list for the hardware's current position. - * Otherwise this only updates on a delayed timer or when irqs are - * actually unmasked, and our working set ends up being larger than - * required. - */ - i915_gem_retire_requests(dev); - obj_priv = to_intel_bo(obj); - /* Don't count being on the flushing list against the object being - * done. Otherwise, a buffer left on the flushing list but not getting - * flushed (because nobody's flushing that domain) won't ever return - * unbusy and get reused by libdrm's bo cache. The other expected - * consumer of this interface, OpenGL's occlusion queries, also specs - * that the objects get unbusy "eventually" without any interference. + /* Count all active objects as busy, even if they are currently not used + * by the gpu. Users of this interface expect objects to eventually + * become non-busy without any further actions, therefore emit any + * necessary flushes here. */ - args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0; + obj_priv = to_intel_bo(obj); + args->busy = obj_priv->active; + if (args->busy) { + /* Unconditionally flush objects, even when the gpu still uses this + * object. Userspace calling this function indicates that it wants to + * use this buffer rather sooner than later, so issuing the required + * flush earlier is beneficial. + */ + if (obj->write_domain) { + i915_gem_flush(dev, 0, obj->write_domain); + (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring); + } + + /* Update the active list for the hardware's current position. + * Otherwise this only updates on a delayed timer or when irqs + * are actually unmasked, and our working set ends up being + * larger than required. + */ + i915_gem_retire_requests_ring(dev, obj_priv->ring); + + args->busy = obj_priv->active; + } drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); @@ -4514,30 +4372,6 @@ void i915_gem_free_object(struct drm_gem_object *obj) i915_gem_free_object_tail(obj); } -/** Unbinds all inactive objects. */ -static int -i915_gem_evict_from_inactive_list(struct drm_device *dev) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - - while (!list_empty(&dev_priv->mm.inactive_list)) { - struct drm_gem_object *obj; - int ret; - - obj = &list_first_entry(&dev_priv->mm.inactive_list, - struct drm_i915_gem_object, - list)->base; - - ret = i915_gem_object_unbind(obj); - if (ret != 0) { - DRM_ERROR("Error unbinding object: %d\n", ret); - return ret; - } - } - - return 0; -} - int i915_gem_idle(struct drm_device *dev) { @@ -4562,7 +4396,7 @@ i915_gem_idle(struct drm_device *dev) /* Under UMS, be paranoid and evict. */ if (!drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = i915_gem_evict_from_inactive_list(dev); + ret = i915_gem_evict_inactive(dev); if (ret) { mutex_unlock(&dev->struct_mutex); return ret; @@ -4680,6 +4514,8 @@ i915_gem_init_ringbuffer(struct drm_device *dev) goto cleanup_render_ring; } + dev_priv->next_seqno = 1; + return 0; cleanup_render_ring: @@ -4841,7 +4677,7 @@ i915_gem_load(struct drm_device *dev) * e.g. for cursor + overlay regs */ int i915_gem_init_phys_object(struct drm_device *dev, - int id, int size) + int id, int size, int align) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_phys_object *phys_obj; @@ -4856,7 +4692,7 @@ int i915_gem_init_phys_object(struct drm_device *dev, phys_obj->id = id; - phys_obj->handle = drm_pci_alloc(dev, size, 0); + phys_obj->handle = drm_pci_alloc(dev, size, align); if (!phys_obj->handle) { ret = -ENOMEM; goto kfree_obj; @@ -4938,7 +4774,9 @@ out: int i915_gem_attach_phys_object(struct drm_device *dev, - struct drm_gem_object *obj, int id) + struct drm_gem_object *obj, + int id, + int align) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv; @@ -4957,11 +4795,10 @@ i915_gem_attach_phys_object(struct drm_device *dev, i915_gem_detach_phys_object(dev, obj); } - /* create a new object */ if (!dev_priv->mm.phys_objs[id - 1]) { ret = i915_gem_init_phys_object(dev, id, - obj->size); + obj->size, align); if (ret) { DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size); goto out; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c new file mode 100644 index 00000000000..72cae3cccad --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -0,0 +1,271 @@ +/* + * Copyright © 2008-2010 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt <eric@anholt.net> + * Chris Wilson <chris@chris-wilson.co.uuk> + * + */ + +#include "drmP.h" +#include "drm.h" +#include "i915_drv.h" +#include "i915_drm.h" + +static struct drm_i915_gem_object * +i915_gem_next_active_object(struct drm_device *dev, + struct list_head **render_iter, + struct list_head **bsd_iter) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL; + + if (*render_iter != &dev_priv->render_ring.active_list) + render_obj = list_entry(*render_iter, + struct drm_i915_gem_object, + list); + + if (HAS_BSD(dev)) { + if (*bsd_iter != &dev_priv->bsd_ring.active_list) + bsd_obj = list_entry(*bsd_iter, + struct drm_i915_gem_object, + list); + + if (render_obj == NULL) { + *bsd_iter = (*bsd_iter)->next; + return bsd_obj; + } + + if (bsd_obj == NULL) { + *render_iter = (*render_iter)->next; + return render_obj; + } + + /* XXX can we handle seqno wrapping? */ + if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) { + *render_iter = (*render_iter)->next; + return render_obj; + } else { + *bsd_iter = (*bsd_iter)->next; + return bsd_obj; + } + } else { + *render_iter = (*render_iter)->next; + return render_obj; + } +} + +static bool +mark_free(struct drm_i915_gem_object *obj_priv, + struct list_head *unwind) +{ + list_add(&obj_priv->evict_list, unwind); + return drm_mm_scan_add_block(obj_priv->gtt_space); +} + +#define i915_for_each_active_object(OBJ, R, B) \ + *(R) = dev_priv->render_ring.active_list.next; \ + *(B) = dev_priv->bsd_ring.active_list.next; \ + while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL) + +int +i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct list_head eviction_list, unwind_list; + struct drm_i915_gem_object *obj_priv, *tmp_obj_priv; + struct list_head *render_iter, *bsd_iter; + int ret = 0; + + i915_gem_retire_requests(dev); + + /* Re-check for free space after retiring requests */ + if (drm_mm_search_free(&dev_priv->mm.gtt_space, + min_size, alignment, 0)) + return 0; + + /* + * The goal is to evict objects and amalgamate space in LRU order. + * The oldest idle objects reside on the inactive list, which is in + * retirement order. The next objects to retire are those on the (per + * ring) active list that do not have an outstanding flush. Once the + * hardware reports completion (the seqno is updated after the + * batchbuffer has been finished) the clean buffer objects would + * be retired to the inactive list. Any dirty objects would be added + * to the tail of the flushing list. So after processing the clean + * active objects we need to emit a MI_FLUSH to retire the flushing + * list, hence the retirement order of the flushing list is in + * advance of the dirty objects on the active lists. + * + * The retirement sequence is thus: + * 1. Inactive objects (already retired) + * 2. Clean active objects + * 3. Flushing list + * 4. Dirty active objects. + * + * On each list, the oldest objects lie at the HEAD with the freshest + * object on the TAIL. + */ + + INIT_LIST_HEAD(&unwind_list); + drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); + + /* First see if there is a large enough contiguous idle region... */ + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { + if (mark_free(obj_priv, &unwind_list)) + goto found; + } + + /* Now merge in the soon-to-be-expired objects... */ + i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) { + /* Does the object require an outstanding flush? */ + if (obj_priv->base.write_domain || obj_priv->pin_count) + continue; + + if (mark_free(obj_priv, &unwind_list)) + goto found; + } + + /* Finally add anything with a pending flush (in order of retirement) */ + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { + if (obj_priv->pin_count) + continue; + + if (mark_free(obj_priv, &unwind_list)) + goto found; + } + i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) { + if (! obj_priv->base.write_domain || obj_priv->pin_count) + continue; + + if (mark_free(obj_priv, &unwind_list)) + goto found; + } + + /* Nothing found, clean up and bail out! */ + list_for_each_entry(obj_priv, &unwind_list, evict_list) { + ret = drm_mm_scan_remove_block(obj_priv->gtt_space); + BUG_ON(ret); + } + + /* We expect the caller to unpin, evict all and try again, or give up. + * So calling i915_gem_evict_everything() is unnecessary. + */ + return -ENOSPC; + +found: + INIT_LIST_HEAD(&eviction_list); + list_for_each_entry_safe(obj_priv, tmp_obj_priv, + &unwind_list, evict_list) { + if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { + /* drm_mm doesn't allow any other other operations while + * scanning, therefore store to be evicted objects on a + * temporary list. */ + list_move(&obj_priv->evict_list, &eviction_list); + } + } + + /* Unbinding will emit any required flushes */ + list_for_each_entry_safe(obj_priv, tmp_obj_priv, + &eviction_list, evict_list) { +#if WATCH_LRU + DRM_INFO("%s: evicting %p\n", __func__, obj); +#endif + ret = i915_gem_object_unbind(&obj_priv->base); + if (ret) + return ret; + } + + /* The just created free hole should be on the top of the free stack + * maintained by drm_mm, so this BUG_ON actually executes in O(1). + * Furthermore all accessed data has just recently been used, so it + * should be really fast, too. */ + BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size, + alignment, 0)); + + return 0; +} + +int +i915_gem_evict_everything(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + bool lists_empty; + + spin_lock(&dev_priv->mm.active_list_lock); + lists_empty = (list_empty(&dev_priv->mm.inactive_list) && + list_empty(&dev_priv->mm.flushing_list) && + list_empty(&dev_priv->render_ring.active_list) && + (!HAS_BSD(dev) + || list_empty(&dev_priv->bsd_ring.active_list))); + spin_unlock(&dev_priv->mm.active_list_lock); + + if (lists_empty) + return -ENOSPC; + + /* Flush everything (on to the inactive lists) and evict */ + ret = i915_gpu_idle(dev); + if (ret) + return ret; + + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); + + ret = i915_gem_evict_inactive(dev); + if (ret) + return ret; + + spin_lock(&dev_priv->mm.active_list_lock); + lists_empty = (list_empty(&dev_priv->mm.inactive_list) && + list_empty(&dev_priv->mm.flushing_list) && + list_empty(&dev_priv->render_ring.active_list) && + (!HAS_BSD(dev) + || list_empty(&dev_priv->bsd_ring.active_list))); + spin_unlock(&dev_priv->mm.active_list_lock); + BUG_ON(!lists_empty); + + return 0; +} + +/** Unbinds all inactive objects. */ +int +i915_gem_evict_inactive(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + while (!list_empty(&dev_priv->mm.inactive_list)) { + struct drm_gem_object *obj; + int ret; + + obj = &list_first_entry(&dev_priv->mm.inactive_list, + struct drm_i915_gem_object, + list)->base; + + ret = i915_gem_object_unbind(obj); + if (ret != 0) { + DRM_ERROR("Error unbinding object: %d\n", ret); + return ret; + } + } + + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 85785a8844e..59457e83b01 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -425,9 +425,11 @@ static struct drm_i915_error_object * i915_error_object_create(struct drm_device *dev, struct drm_gem_object *src) { + drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_error_object *dst; struct drm_i915_gem_object *src_priv; int page, page_count; + u32 reloc_offset; if (src == NULL) return NULL; @@ -442,18 +444,27 @@ i915_error_object_create(struct drm_device *dev, if (dst == NULL) return NULL; + reloc_offset = src_priv->gtt_offset; for (page = 0; page < page_count; page++) { - void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC); unsigned long flags; + void __iomem *s; + void *d; + d = kmalloc(PAGE_SIZE, GFP_ATOMIC); if (d == NULL) goto unwind; + local_irq_save(flags); - s = kmap_atomic(src_priv->pages[page], KM_IRQ0); - memcpy(d, s, PAGE_SIZE); - kunmap_atomic(s, KM_IRQ0); + s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, + reloc_offset, + KM_IRQ0); + memcpy_fromio(d, s, PAGE_SIZE); + io_mapping_unmap_atomic(s, KM_IRQ0); local_irq_restore(flags); + dst->pages[page] = d; + + reloc_offset += PAGE_SIZE; } dst->page_count = page_count; dst->gtt_offset = src_priv->gtt_offset; @@ -489,6 +500,7 @@ i915_error_state_free(struct drm_device *dev, i915_error_object_free(error->batchbuffer[1]); i915_error_object_free(error->ringbuffer); kfree(error->active_bo); + kfree(error->overlay); kfree(error); } @@ -612,18 +624,57 @@ static void i915_capture_error_state(struct drm_device *dev) if (batchbuffer[1] == NULL && error->acthd >= obj_priv->gtt_offset && - error->acthd < obj_priv->gtt_offset + obj->size && - batchbuffer[0] != obj) + error->acthd < obj_priv->gtt_offset + obj->size) batchbuffer[1] = obj; count++; } + /* Scan the other lists for completeness for those bizarre errors. */ + if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { + struct drm_gem_object *obj = &obj_priv->base; + + if (batchbuffer[0] == NULL && + bbaddr >= obj_priv->gtt_offset && + bbaddr < obj_priv->gtt_offset + obj->size) + batchbuffer[0] = obj; + + if (batchbuffer[1] == NULL && + error->acthd >= obj_priv->gtt_offset && + error->acthd < obj_priv->gtt_offset + obj->size) + batchbuffer[1] = obj; + + if (batchbuffer[0] && batchbuffer[1]) + break; + } + } + if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { + struct drm_gem_object *obj = &obj_priv->base; + + if (batchbuffer[0] == NULL && + bbaddr >= obj_priv->gtt_offset && + bbaddr < obj_priv->gtt_offset + obj->size) + batchbuffer[0] = obj; + + if (batchbuffer[1] == NULL && + error->acthd >= obj_priv->gtt_offset && + error->acthd < obj_priv->gtt_offset + obj->size) + batchbuffer[1] = obj; + + if (batchbuffer[0] && batchbuffer[1]) + break; + } + } /* We need to copy these to an anonymous buffer as the simplest * method to avoid being overwritten by userpace. */ error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]); - error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); + if (batchbuffer[1] != batchbuffer[0]) + error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); + else + error->batchbuffer[1] = NULL; /* Record the ringbuffer */ error->ringbuffer = i915_error_object_create(dev, @@ -667,6 +718,8 @@ static void i915_capture_error_state(struct drm_device *dev) do_gettimeofday(&error->time); + error->overlay = intel_overlay_capture_error_state(dev); + spin_lock_irqsave(&dev_priv->error_lock, flags); if (dev_priv->first_error == NULL) { dev_priv->first_error = error; @@ -834,6 +887,49 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) queue_work(dev_priv->wq, &dev_priv->error_work); } +static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_i915_gem_object *obj_priv; + struct intel_unpin_work *work; + unsigned long flags; + bool stall_detected; + + /* Ignore early vblank irqs */ + if (intel_crtc == NULL) + return; + + spin_lock_irqsave(&dev->event_lock, flags); + work = intel_crtc->unpin_work; + + if (work == NULL || work->pending || !work->enable_stall_check) { + /* Either the pending flip IRQ arrived, or we're too early. Don't check */ + spin_unlock_irqrestore(&dev->event_lock, flags); + return; + } + + /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ + obj_priv = to_intel_bo(work->pending_flip_obj); + if(IS_I965G(dev)) { + int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; + stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset; + } else { + int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR; + stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset + + crtc->y * crtc->fb->pitch + + crtc->x * crtc->fb->bits_per_pixel/8); + } + + spin_unlock_irqrestore(&dev->event_lock, flags); + + if (stall_detected) { + DRM_DEBUG_DRIVER("Pageflip stall detected\n"); + intel_prepare_page_flip(dev, intel_crtc->plane); + } +} + irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; @@ -951,15 +1047,19 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) if (pipea_stats & vblank_status) { vblank++; drm_handle_vblank(dev, 0); - if (!dev_priv->flip_pending_is_done) + if (!dev_priv->flip_pending_is_done) { + i915_pageflip_stall_check(dev, 0); intel_finish_page_flip(dev, 0); + } } if (pipeb_stats & vblank_status) { vblank++; drm_handle_vblank(dev, 1); - if (!dev_priv->flip_pending_is_done) + if (!dev_priv->flip_pending_is_done) { + i915_pageflip_stall_check(dev, 1); intel_finish_page_flip(dev, 1); + } } if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || @@ -1251,6 +1351,16 @@ void i915_hangcheck_elapsed(unsigned long data) &dev_priv->render_ring), i915_get_tail_request(dev)->seqno)) { dev_priv->hangcheck_count = 0; + + /* Issue a wake-up to catch stuck h/w. */ + if (dev_priv->render_ring.waiting_gem_seqno | + dev_priv->bsd_ring.waiting_gem_seqno) { + DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); + if (dev_priv->render_ring.waiting_gem_seqno) + DRM_WAKEUP(&dev_priv->render_ring.irq_queue); + if (dev_priv->bsd_ring.waiting_gem_seqno) + DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); + } return; } @@ -1318,12 +1428,17 @@ static int ironlake_irq_postinstall(struct drm_device *dev) I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); (void) I915_READ(DEIER); - /* user interrupt should be enabled, but masked initial */ + /* Gen6 only needs render pipe_control now */ + if (IS_GEN6(dev)) + render_mask = GT_PIPE_NOTIFY; + dev_priv->gt_irq_mask_reg = ~render_mask; dev_priv->gt_irq_enable_reg = render_mask; I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); + if (IS_GEN6(dev)) + I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT); I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); (void) I915_READ(GTIER); diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c index 8fcc75c1aa2..ea5d3fea4b6 100644 --- a/drivers/gpu/drm/i915/i915_opregion.c +++ b/drivers/gpu/drm/i915/i915_opregion.c @@ -114,10 +114,6 @@ struct opregion_asle { #define ASLE_REQ_MSK 0xf /* response bits of ASLE irq request */ -#define ASLE_ALS_ILLUM_FAIL (2<<10) -#define ASLE_BACKLIGHT_FAIL (2<<12) -#define ASLE_PFIT_FAIL (2<<14) -#define ASLE_PWM_FREQ_FAIL (2<<16) #define ASLE_ALS_ILLUM_FAILED (1<<10) #define ASLE_BACKLIGHT_FAILED (1<<12) #define ASLE_PFIT_FAILED (1<<14) @@ -155,11 +151,11 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) u32 max_backlight, level, shift; if (!(bclp & ASLE_BCLP_VALID)) - return ASLE_BACKLIGHT_FAIL; + return ASLE_BACKLIGHT_FAILED; bclp &= ASLE_BCLP_MSK; if (bclp < 0 || bclp > 255) - return ASLE_BACKLIGHT_FAIL; + return ASLE_BACKLIGHT_FAILED; blc_pwm_ctl = I915_READ(BLC_PWM_CTL); blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2); @@ -211,7 +207,7 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) /* Panel fitting is currently controlled by the X code, so this is a noop until modesetting support works fully */ if (!(pfit & ASLE_PFIT_VALID)) - return ASLE_PFIT_FAIL; + return ASLE_PFIT_FAILED; return 0; } @@ -535,6 +531,7 @@ int intel_opregion_init(struct drm_device *dev, int resume) err_out: iounmap(opregion->header); opregion->header = NULL; + acpi_video_register(); return err; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 281db6e5403..d094e912922 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -170,6 +170,7 @@ #define MI_NO_WRITE_FLUSH (1 << 2) #define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ +#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) #define MI_REPORT_HEAD MI_INSTR(0x07, 0) #define MI_OVERLAY_FLIP MI_INSTR(0x11,0) @@ -180,6 +181,12 @@ #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) #define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) +#define MI_SET_CONTEXT MI_INSTR(0x18, 0) +#define MI_MM_SPACE_GTT (1<<8) +#define MI_MM_SPACE_PHYSICAL (0<<8) +#define MI_SAVE_EXT_STATE_EN (1<<3) +#define MI_RESTORE_EXT_STATE_EN (1<<2) +#define MI_RESTORE_INHIBIT (1<<0) #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) @@ -312,6 +319,7 @@ #define MI_MODE 0x0209c # define VS_TIMER_DISPATCH (1 << 6) +# define MI_FLUSH_ENABLE (1 << 11) #define SCPD0 0x0209c /* 915+ only */ #define IER 0x020a0 @@ -1100,6 +1108,11 @@ #define PEG_BAND_GAP_DATA 0x14d68 /* + * Logical Context regs + */ +#define CCID 0x2180 +#define CCID_EN (1<<0) +/* * Overlay regs */ @@ -2069,6 +2082,7 @@ #define PIPE_DITHER_TYPE_ST01 (1 << 2) /* Pipe A */ #define PIPEADSL 0x70000 +#define DSL_LINEMASK 0x00000fff #define PIPEACONF 0x70008 #define PIPEACONF_ENABLE (1<<31) #define PIPEACONF_DISABLE 0 @@ -2928,6 +2942,7 @@ #define TRANS_DP_VSYNC_ACTIVE_LOW 0 #define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3) #define TRANS_DP_HSYNC_ACTIVE_LOW 0 +#define TRANS_DP_SYNC_MASK (3<<3) /* SNB eDP training params */ /* SNB A-stepping */ diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 6e2025274db..2c6b98f2440 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -34,7 +34,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) struct drm_i915_private *dev_priv = dev->dev_private; u32 dpll_reg; - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B; } else { dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B; @@ -53,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe) if (!i915_pipe_enabled(dev, pipe)) return; - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; if (pipe == PIPE_A) @@ -75,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) if (!i915_pipe_enabled(dev, pipe)) return; - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; if (pipe == PIPE_A) @@ -239,7 +239,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) if (drm_core_check_feature(dev, DRIVER_MODESET)) return; - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); } @@ -247,7 +247,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) /* Pipe & plane A info */ dev_priv->savePIPEACONF = I915_READ(PIPEACONF); dev_priv->savePIPEASRC = I915_READ(PIPEASRC); - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { dev_priv->saveFPA0 = I915_READ(PCH_FPA0); dev_priv->saveFPA1 = I915_READ(PCH_FPA1); dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A); @@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) dev_priv->saveFPA1 = I915_READ(FPA1); dev_priv->saveDPLL_A = I915_READ(DPLL_A); } - if (IS_I965G(dev) && !IS_IRONLAKE(dev)) + if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); @@ -264,10 +264,10 @@ static void i915_save_modeset_reg(struct drm_device *dev) dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); - if (!IS_IRONLAKE(dev)) + if (!HAS_PCH_SPLIT(dev)) dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1); dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1); dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1); @@ -304,7 +304,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) /* Pipe & plane B info */ dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { dev_priv->saveFPB0 = I915_READ(PCH_FPB0); dev_priv->saveFPB1 = I915_READ(PCH_FPB1); dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B); @@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) dev_priv->saveFPB1 = I915_READ(FPB1); dev_priv->saveDPLL_B = I915_READ(DPLL_B); } - if (IS_I965G(dev) && !IS_IRONLAKE(dev)) + if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); @@ -321,10 +321,10 @@ static void i915_save_modeset_reg(struct drm_device *dev) dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); - if (!IS_IRONLAKE(dev)) + if (!HAS_PCH_SPLIT(dev)) dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B); - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1); dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1); dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1); @@ -369,7 +369,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev) if (drm_core_check_feature(dev, DRIVER_MODESET)) return; - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { dpll_a_reg = PCH_DPLL_A; dpll_b_reg = PCH_DPLL_B; fpa0_reg = PCH_FPA0; @@ -385,7 +385,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev) fpb1_reg = FPB1; } - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL); I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL); } @@ -395,16 +395,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev) if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A & ~DPLL_VCO_ENABLE); - DRM_UDELAY(150); + POSTING_READ(dpll_a_reg); + udelay(150); } I915_WRITE(fpa0_reg, dev_priv->saveFPA0); I915_WRITE(fpa1_reg, dev_priv->saveFPA1); /* Actually enable it */ I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A); - DRM_UDELAY(150); - if (IS_I965G(dev) && !IS_IRONLAKE(dev)) + POSTING_READ(dpll_a_reg); + udelay(150); + if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); - DRM_UDELAY(150); + POSTING_READ(DPLL_A_MD); + } + udelay(150); /* Restore mode */ I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A); @@ -413,10 +417,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev) I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); - if (!IS_IRONLAKE(dev)) + if (!HAS_PCH_SPLIT(dev)) I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); @@ -460,16 +464,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev) if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B & ~DPLL_VCO_ENABLE); - DRM_UDELAY(150); + POSTING_READ(dpll_b_reg); + udelay(150); } I915_WRITE(fpb0_reg, dev_priv->saveFPB0); I915_WRITE(fpb1_reg, dev_priv->saveFPB1); /* Actually enable it */ I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); - DRM_UDELAY(150); - if (IS_I965G(dev) && !IS_IRONLAKE(dev)) + POSTING_READ(dpll_b_reg); + udelay(150); + if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); - DRM_UDELAY(150); + POSTING_READ(DPLL_B_MD); + } + udelay(150); /* Restore mode */ I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B); @@ -478,10 +486,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev) I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); - if (!IS_IRONLAKE(dev)) + if (!HAS_PCH_SPLIT(dev)) I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); @@ -546,14 +554,14 @@ void i915_save_display(struct drm_device *dev) dev_priv->saveCURSIZE = I915_READ(CURSIZE); /* CRT state */ - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { dev_priv->saveADPA = I915_READ(PCH_ADPA); } else { dev_priv->saveADPA = I915_READ(ADPA); } /* LVDS state */ - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL); dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); @@ -571,10 +579,10 @@ void i915_save_display(struct drm_device *dev) dev_priv->saveLVDS = I915_READ(LVDS); } - if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev)) + if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); @@ -602,7 +610,7 @@ void i915_save_display(struct drm_device *dev) /* Only save FBC state on the platform that supports FBC */ if (I915_HAS_FBC(dev)) { - if (IS_IRONLAKE_M(dev)) { + if (HAS_PCH_SPLIT(dev)) { dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); } else if (IS_GM45(dev)) { dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); @@ -618,7 +626,7 @@ void i915_save_display(struct drm_device *dev) dev_priv->saveVGA0 = I915_READ(VGA0); dev_priv->saveVGA1 = I915_READ(VGA1); dev_priv->saveVGA_PD = I915_READ(VGA_PD); - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL); else dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); @@ -660,24 +668,24 @@ void i915_restore_display(struct drm_device *dev) I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); /* CRT state */ - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) I915_WRITE(PCH_ADPA, dev_priv->saveADPA); else I915_WRITE(ADPA, dev_priv->saveADPA); /* LVDS state */ - if (IS_I965G(dev) && !IS_IRONLAKE(dev)) + if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { I915_WRITE(PCH_LVDS, dev_priv->saveLVDS); } else if (IS_MOBILE(dev) && !IS_I830(dev)) I915_WRITE(LVDS, dev_priv->saveLVDS); - if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev)) + if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL); I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2); I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL); @@ -708,7 +716,7 @@ void i915_restore_display(struct drm_device *dev) /* only restore FBC info on the platform that supports FBC*/ if (I915_HAS_FBC(dev)) { - if (IS_IRONLAKE_M(dev)) { + if (HAS_PCH_SPLIT(dev)) { ironlake_disable_fbc(dev); I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); } else if (IS_GM45(dev)) { @@ -723,14 +731,15 @@ void i915_restore_display(struct drm_device *dev) } } /* VGA state */ - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); else I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); I915_WRITE(VGA0, dev_priv->saveVGA0); I915_WRITE(VGA1, dev_priv->saveVGA1); I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); - DRM_UDELAY(150); + POSTING_READ(VGA_PD); + udelay(150); i915_restore_vga(dev); } @@ -748,7 +757,7 @@ int i915_save_state(struct drm_device *dev) i915_save_display(dev); /* Interrupt state */ - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { dev_priv->saveDEIER = I915_READ(DEIER); dev_priv->saveDEIMR = I915_READ(DEIMR); dev_priv->saveGTIER = I915_READ(GTIER); @@ -762,7 +771,7 @@ int i915_save_state(struct drm_device *dev) dev_priv->saveIMR = I915_READ(IMR); } - if (IS_IRONLAKE_M(dev)) + if (HAS_PCH_SPLIT(dev)) ironlake_disable_drps(dev); /* Cache mode state */ @@ -820,7 +829,7 @@ int i915_restore_state(struct drm_device *dev) i915_restore_display(dev); /* Interrupt state */ - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { I915_WRITE(DEIER, dev_priv->saveDEIER); I915_WRITE(DEIMR, dev_priv->saveDEIMR); I915_WRITE(GTIER, dev_priv->saveGTIER); @@ -835,7 +844,7 @@ int i915_restore_state(struct drm_device *dev) /* Clock gating state */ intel_init_clock_gating(dev); - if (IS_IRONLAKE_M(dev)) + if (HAS_PCH_SPLIT(dev)) ironlake_enable_drps(dev); /* Cache mode state */ diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index ee0732b222a..4b7735196cd 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -160,19 +160,20 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) struct drm_i915_private *dev_priv = dev->dev_private; u32 adpa, temp; bool ret; + bool turn_off_dac = false; temp = adpa = I915_READ(PCH_ADPA); - if (HAS_PCH_CPT(dev)) { - /* Disable DAC before force detect */ - I915_WRITE(PCH_ADPA, adpa & ~ADPA_DAC_ENABLE); - (void)I915_READ(PCH_ADPA); - } else { - adpa &= ~ADPA_CRT_HOTPLUG_MASK; - /* disable HPD first */ - I915_WRITE(PCH_ADPA, adpa); - (void)I915_READ(PCH_ADPA); - } + if (HAS_PCH_SPLIT(dev)) + turn_off_dac = true; + + adpa &= ~ADPA_CRT_HOTPLUG_MASK; + if (turn_off_dac) + adpa &= ~ADPA_DAC_ENABLE; + + /* disable HPD first */ + I915_WRITE(PCH_ADPA, adpa); + (void)I915_READ(PCH_ADPA); adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | ADPA_CRT_HOTPLUG_WARMUP_10MS | @@ -185,10 +186,11 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa); I915_WRITE(PCH_ADPA, adpa); - while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0) - ; + if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, + 1000, 1)) + DRM_ERROR("timed out waiting for FORCE_TRIGGER"); - if (HAS_PCH_CPT(dev)) { + if (turn_off_dac) { I915_WRITE(PCH_ADPA, temp); (void)I915_READ(PCH_ADPA); } @@ -237,17 +239,13 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; for (i = 0; i < tries ; i++) { - unsigned long timeout; /* turn on the FORCE_DETECT */ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); - timeout = jiffies + msecs_to_jiffies(1000); /* wait for FORCE_DETECT to go off */ - do { - if (!(I915_READ(PORT_HOTPLUG_EN) & - CRT_HOTPLUG_FORCE_DETECT)) - break; - msleep(1); - } while (time_after(timeout, jiffies)); + if (wait_for((I915_READ(PORT_HOTPLUG_EN) & + CRT_HOTPLUG_FORCE_DETECT) == 0, + 1000, 1)) + DRM_ERROR("timed out waiting for FORCE_DETECT to go off"); } stat = I915_READ(PORT_HOTPLUG_STAT); @@ -331,7 +329,7 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER); /* Wait for next Vblank to substitue * border color for Color info */ - intel_wait_for_vblank(dev); + intel_wait_for_vblank(dev, pipe); st00 = I915_READ8(VGA_MSR_WRITE); status = ((st00 & (1 << 4)) != 0) ? connector_status_connected : @@ -508,17 +506,8 @@ static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs .best_encoder = intel_attached_encoder, }; -static void intel_crt_enc_destroy(struct drm_encoder *encoder) -{ - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - - intel_i2c_destroy(intel_encoder->ddc_bus); - drm_encoder_cleanup(encoder); - kfree(intel_encoder); -} - static const struct drm_encoder_funcs intel_crt_enc_funcs = { - .destroy = intel_crt_enc_destroy, + .destroy = intel_encoder_destroy, }; void intel_crt_init(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5ec10e02341..40cc5da264a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -29,6 +29,7 @@ #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/slab.h> +#include <linux/vgaarb.h> #include "drmP.h" #include "intel_drv.h" #include "i915_drm.h" @@ -976,14 +977,70 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, return true; } -void -intel_wait_for_vblank(struct drm_device *dev) +/** + * intel_wait_for_vblank - wait for vblank on a given pipe + * @dev: drm device + * @pipe: pipe to wait for + * + * Wait for vblank to occur on a given pipe. Needed for various bits of + * mode setting code. + */ +void intel_wait_for_vblank(struct drm_device *dev, int pipe) { - /* Wait for 20ms, i.e. one cycle at 50hz. */ - if (in_dbg_master()) - mdelay(20); /* The kernel debugger cannot call msleep() */ - else - msleep(20); + struct drm_i915_private *dev_priv = dev->dev_private; + int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT); + + /* Clear existing vblank status. Note this will clear any other + * sticky status fields as well. + * + * This races with i915_driver_irq_handler() with the result + * that either function could miss a vblank event. Here it is not + * fatal, as we will either wait upon the next vblank interrupt or + * timeout. Generally speaking intel_wait_for_vblank() is only + * called during modeset at which time the GPU should be idle and + * should *not* be performing page flips and thus not waiting on + * vblanks... + * Currently, the result of us stealing a vblank from the irq + * handler is that a single frame will be skipped during swapbuffers. + */ + I915_WRITE(pipestat_reg, + I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); + + /* Wait for vblank interrupt bit to set */ + if (wait_for((I915_READ(pipestat_reg) & + PIPE_VBLANK_INTERRUPT_STATUS), + 50, 0)) + DRM_DEBUG_KMS("vblank wait timed out\n"); +} + +/** + * intel_wait_for_vblank_off - wait for vblank after disabling a pipe + * @dev: drm device + * @pipe: pipe to wait for + * + * After disabling a pipe, we can't wait for vblank in the usual way, + * spinning on the vblank interrupt status bit, since we won't actually + * see an interrupt when the pipe is disabled. + * + * So this function waits for the display line value to settle (it + * usually ends up stopping at the start of the next frame). + */ +void intel_wait_for_vblank_off(struct drm_device *dev, int pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); + unsigned long timeout = jiffies + msecs_to_jiffies(100); + u32 last_line; + + /* Wait for the display line to settle */ + do { + last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; + mdelay(5); + } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && + time_after(timeout, jiffies)); + + if (time_after(jiffies, timeout)) + DRM_DEBUG_KMS("vblank wait timed out\n"); } /* Parameters have changed, update FBC info */ @@ -1037,7 +1094,6 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) void i8xx_disable_fbc(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long timeout = jiffies + msecs_to_jiffies(1); u32 fbc_ctl; if (!I915_HAS_FBC(dev)) @@ -1052,16 +1108,11 @@ void i8xx_disable_fbc(struct drm_device *dev) I915_WRITE(FBC_CONTROL, fbc_ctl); /* Wait for compressing bit to clear */ - while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) { - if (time_after(jiffies, timeout)) { - DRM_DEBUG_DRIVER("FBC idle timed out\n"); - break; - } - ; /* do nothing */ + if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, 0)) { + DRM_DEBUG_KMS("FBC idle timed out\n"); + return; } - intel_wait_for_vblank(dev); - DRM_DEBUG_KMS("disabled FBC\n"); } @@ -1118,7 +1169,6 @@ void g4x_disable_fbc(struct drm_device *dev) dpfc_ctl = I915_READ(DPFC_CONTROL); dpfc_ctl &= ~DPFC_CTL_EN; I915_WRITE(DPFC_CONTROL, dpfc_ctl); - intel_wait_for_vblank(dev); DRM_DEBUG_KMS("disabled FBC\n"); } @@ -1179,7 +1229,6 @@ void ironlake_disable_fbc(struct drm_device *dev) dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); dpfc_ctl &= ~DPFC_CTL_EN; I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); - intel_wait_for_vblank(dev); DRM_DEBUG_KMS("disabled FBC\n"); } @@ -1453,7 +1502,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, dspcntr &= ~DISPPLANE_TILED; } - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) /* must disable */ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; @@ -1462,23 +1511,22 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, Start = obj_priv->gtt_offset; Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); - DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); + DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", + Start, Offset, x, y, fb->pitch); I915_WRITE(dspstride, fb->pitch); if (IS_I965G(dev)) { - I915_WRITE(dspbase, Offset); - I915_READ(dspbase); I915_WRITE(dspsurf, Start); - I915_READ(dspsurf); I915_WRITE(dsptileoff, (y << 16) | x); + I915_WRITE(dspbase, Offset); } else { I915_WRITE(dspbase, Start + Offset); - I915_READ(dspbase); } + POSTING_READ(dspbase); - if ((IS_I965G(dev) || plane == 0)) + if (IS_I965G(dev) || plane == 0) intel_update_fbc(crtc, &crtc->mode); - intel_wait_for_vblank(dev); + intel_wait_for_vblank(dev, intel_crtc->pipe); intel_increase_pllclock(crtc, true); return 0; @@ -1489,7 +1537,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_framebuffer *intel_fb; @@ -1497,13 +1544,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_gem_object *obj; int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; - unsigned long Start, Offset; - int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR); - int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF); - int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE; - int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF); - int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; - u32 dspcntr; int ret; /* no fb bound */ @@ -1539,73 +1579,18 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, return ret; } - dspcntr = I915_READ(dspcntr_reg); - /* Mask out pixel format bits in case we change it */ - dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; - switch (crtc->fb->bits_per_pixel) { - case 8: - dspcntr |= DISPPLANE_8BPP; - break; - case 16: - if (crtc->fb->depth == 15) - dspcntr |= DISPPLANE_15_16BPP; - else - dspcntr |= DISPPLANE_16BPP; - break; - case 24: - case 32: - if (crtc->fb->depth == 30) - dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA; - else - dspcntr |= DISPPLANE_32BPP_NO_ALPHA; - break; - default: - DRM_ERROR("Unknown color depth\n"); + ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y); + if (ret) { i915_gem_object_unpin(obj); mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - if (IS_I965G(dev)) { - if (obj_priv->tiling_mode != I915_TILING_NONE) - dspcntr |= DISPPLANE_TILED; - else - dspcntr &= ~DISPPLANE_TILED; - } - - if (HAS_PCH_SPLIT(dev)) - /* must disable */ - dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; - - I915_WRITE(dspcntr_reg, dspcntr); - - Start = obj_priv->gtt_offset; - Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); - - DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", - Start, Offset, x, y, crtc->fb->pitch); - I915_WRITE(dspstride, crtc->fb->pitch); - if (IS_I965G(dev)) { - I915_WRITE(dspbase, Offset); - I915_READ(dspbase); - I915_WRITE(dspsurf, Start); - I915_READ(dspsurf); - I915_WRITE(dsptileoff, (y << 16) | x); - } else { - I915_WRITE(dspbase, Start + Offset); - I915_READ(dspbase); + return ret; } - if ((IS_I965G(dev) || plane == 0)) - intel_update_fbc(crtc, &crtc->mode); - - intel_wait_for_vblank(dev); - if (old_fb) { intel_fb = to_intel_framebuffer(old_fb); obj_priv = to_intel_bo(intel_fb->obj); i915_gem_object_unpin(intel_fb->obj); } - intel_increase_pllclock(crtc, true); mutex_unlock(&dev->struct_mutex); @@ -1627,54 +1612,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, return 0; } -/* Disable the VGA plane that we never use */ -static void i915_disable_vga (struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u8 sr1; - u32 vga_reg; - - if (HAS_PCH_SPLIT(dev)) - vga_reg = CPU_VGACNTRL; - else - vga_reg = VGACNTRL; - - if (I915_READ(vga_reg) & VGA_DISP_DISABLE) - return; - - I915_WRITE8(VGA_SR_INDEX, 1); - sr1 = I915_READ8(VGA_SR_DATA); - I915_WRITE8(VGA_SR_DATA, sr1 | (1 << 5)); - udelay(100); - - I915_WRITE(vga_reg, VGA_DISP_DISABLE); -} - -static void ironlake_disable_pll_edp (struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 dpa_ctl; - - DRM_DEBUG_KMS("\n"); - dpa_ctl = I915_READ(DP_A); - dpa_ctl &= ~DP_PLL_ENABLE; - I915_WRITE(DP_A, dpa_ctl); -} - -static void ironlake_enable_pll_edp (struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 dpa_ctl; - - dpa_ctl = I915_READ(DP_A); - dpa_ctl |= DP_PLL_ENABLE; - I915_WRITE(DP_A, dpa_ctl); - udelay(200); -} - - static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock) { struct drm_device *dev = crtc->dev; @@ -1928,9 +1865,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; - int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; - int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ; - int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS; int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; @@ -1945,7 +1879,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; int trans_dpll_sel = (pipe == 0) ? 0 : 1; u32 temp; - int n; u32 pipe_bpc; temp = I915_READ(pipeconf_reg); @@ -1958,7 +1891,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_ON: case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: - DRM_DEBUG_KMS("crtc %d dpms on\n", pipe); + DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane); if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { temp = I915_READ(PCH_LVDS); @@ -1968,10 +1901,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) } } - if (HAS_eDP) { - /* enable eDP PLL */ - ironlake_enable_pll_edp(crtc); - } else { + if (!HAS_eDP) { /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ temp = I915_READ(fdi_rx_reg); @@ -2003,17 +1933,19 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) } /* Enable panel fitting for LVDS */ - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) - || HAS_eDP || intel_pch_has_edp(crtc)) { - temp = I915_READ(pf_ctl_reg); - I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3); - - /* currently full aspect */ - I915_WRITE(pf_win_pos, 0); - - I915_WRITE(pf_win_size, - (dev_priv->panel_fixed_mode->hdisplay << 16) | - (dev_priv->panel_fixed_mode->vdisplay)); + if (dev_priv->pch_pf_size && + (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) + || HAS_eDP || intel_pch_has_edp(crtc))) { + /* Force use of hard-coded filter coefficients + * as some pre-programmed values are broken, + * e.g. x201. + */ + I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, + PF_ENABLE | PF_FILTER_MED_3x3); + I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS, + dev_priv->pch_pf_pos); + I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, + dev_priv->pch_pf_size); } /* Enable CPU pipe */ @@ -2097,9 +2029,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) int reg; reg = I915_READ(trans_dp_ctl); - reg &= ~TRANS_DP_PORT_SEL_MASK; - reg = TRANS_DP_OUTPUT_ENABLE | - TRANS_DP_ENH_FRAMING; + reg &= ~(TRANS_DP_PORT_SEL_MASK | + TRANS_DP_SYNC_MASK); + reg |= (TRANS_DP_OUTPUT_ENABLE | + TRANS_DP_ENH_FRAMING); if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) reg |= TRANS_DP_HSYNC_ACTIVE_HIGH; @@ -2137,18 +2070,17 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) I915_WRITE(transconf_reg, temp | TRANS_ENABLE); I915_READ(transconf_reg); - while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0) - ; - + if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1)) + DRM_ERROR("failed to enable transcoder\n"); } intel_crtc_load_lut(crtc); intel_update_fbc(crtc, &crtc->mode); + break; - break; case DRM_MODE_DPMS_OFF: - DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); + DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane); drm_vblank_off(dev, pipe); /* Disable display plane */ @@ -2164,40 +2096,22 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) dev_priv->display.disable_fbc) dev_priv->display.disable_fbc(dev); - i915_disable_vga(dev); - /* disable cpu pipe, disable after all planes disabled */ temp = I915_READ(pipeconf_reg); if ((temp & PIPEACONF_ENABLE) != 0) { I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); - I915_READ(pipeconf_reg); - n = 0; + /* wait for cpu pipe off, pipe state */ - while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0) { - n++; - if (n < 60) { - udelay(500); - continue; - } else { - DRM_DEBUG_KMS("pipe %d off delay\n", - pipe); - break; - } - } + if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50, 1)) + DRM_ERROR("failed to turn off cpu pipe\n"); } else DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); udelay(100); /* Disable PF */ - temp = I915_READ(pf_ctl_reg); - if ((temp & PF_ENABLE) != 0) { - I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); - I915_READ(pf_ctl_reg); - } - I915_WRITE(pf_win_size, 0); - POSTING_READ(pf_win_size); - + I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0); + I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0); /* disable CPU FDI tx and PCH FDI rx */ temp = I915_READ(fdi_tx_reg); @@ -2244,20 +2158,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) temp = I915_READ(transconf_reg); if ((temp & TRANS_ENABLE) != 0) { I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE); - I915_READ(transconf_reg); - n = 0; + /* wait for PCH transcoder off, transcoder state */ - while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0) { - n++; - if (n < 60) { - udelay(500); - continue; - } else { - DRM_DEBUG_KMS("transcoder %d off " - "delay\n", pipe); - break; - } - } + if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50, 1)) + DRM_ERROR("failed to disable transcoder\n"); } temp = I915_READ(transconf_reg); @@ -2294,10 +2198,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); I915_READ(pch_dpll_reg); - if (HAS_eDP) { - ironlake_disable_pll_edp(crtc); - } - /* Switch from PCDclk to Rawclk */ temp = I915_READ(fdi_rx_reg); temp &= ~FDI_SEL_PCDCLK; @@ -2372,8 +2272,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_ON: case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: - intel_update_watermarks(dev); - /* Enable the DPLL */ temp = I915_READ(dpll_reg); if ((temp & DPLL_VCO_ENABLE) == 0) { @@ -2413,8 +2311,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) intel_crtc_dpms_overlay(intel_crtc, true); break; case DRM_MODE_DPMS_OFF: - intel_update_watermarks(dev); - /* Give the overlay scaler a chance to disable if it's on this pipe */ intel_crtc_dpms_overlay(intel_crtc, false); drm_vblank_off(dev, pipe); @@ -2423,9 +2319,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) dev_priv->display.disable_fbc) dev_priv->display.disable_fbc(dev); - /* Disable the VGA plane that we never use */ - i915_disable_vga(dev); - /* Disable display plane */ temp = I915_READ(dspcntr_reg); if ((temp & DISPLAY_PLANE_ENABLE) != 0) { @@ -2435,10 +2328,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) I915_READ(dspbase_reg); } - if (!IS_I9XX(dev)) { - /* Wait for vblank for the disable to take effect */ - intel_wait_for_vblank(dev); - } + /* Wait for vblank for the disable to take effect */ + intel_wait_for_vblank_off(dev, pipe); /* Don't disable pipe A or pipe A PLLs if needed */ if (pipeconf_reg == PIPEACONF && @@ -2453,7 +2344,7 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) } /* Wait for vblank for the disable to take effect. */ - intel_wait_for_vblank(dev); + intel_wait_for_vblank_off(dev, pipe); temp = I915_READ(dpll_reg); if ((temp & DPLL_VCO_ENABLE) != 0) { @@ -2469,9 +2360,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) /** * Sets the power management mode of the pipe and plane. - * - * This code should probably grow support for turning the cursor off and back - * on appropriately at the same time as we're turning the pipe off/on. */ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) { @@ -2482,9 +2370,29 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) int pipe = intel_crtc->pipe; bool enabled; - dev_priv->display.dpms(crtc, mode); + if (intel_crtc->dpms_mode == mode) + return; intel_crtc->dpms_mode = mode; + intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON; + + /* When switching on the display, ensure that SR is disabled + * with multiple pipes prior to enabling to new pipe. + * + * When switching off the display, make sure the cursor is + * properly hidden prior to disabling the pipe. + */ + if (mode == DRM_MODE_DPMS_ON) + intel_update_watermarks(dev); + else + intel_crtc_update_cursor(crtc); + + dev_priv->display.dpms(crtc, mode); + + if (mode == DRM_MODE_DPMS_ON) + intel_crtc_update_cursor(crtc); + else + intel_update_watermarks(dev); if (!dev->primary->master) return; @@ -2536,6 +2444,20 @@ void intel_encoder_commit (struct drm_encoder *encoder) encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); } +void intel_encoder_destroy(struct drm_encoder *encoder) +{ + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + + if (intel_encoder->ddc_bus) + intel_i2c_destroy(intel_encoder->ddc_bus); + + if (intel_encoder->i2c_bus) + intel_i2c_destroy(intel_encoder->i2c_bus); + + drm_encoder_cleanup(encoder); + kfree(intel_encoder); +} + static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -2867,7 +2789,7 @@ struct cxsr_latency { unsigned long cursor_hpll_disable; }; -static struct cxsr_latency cxsr_latency_table[] = { +static const struct cxsr_latency cxsr_latency_table[] = { {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ @@ -2905,11 +2827,13 @@ static struct cxsr_latency cxsr_latency_table[] = { {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ }; -static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3, - int fsb, int mem) +static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, + int is_ddr3, + int fsb, + int mem) { + const struct cxsr_latency *latency; int i; - struct cxsr_latency *latency; if (fsb == 0 || mem == 0) return NULL; @@ -2930,13 +2854,9 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3, static void pineview_disable_cxsr(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - u32 reg; /* deactivate cxsr */ - reg = I915_READ(DSPFW3); - reg &= ~(PINEVIEW_SELF_REFRESH_EN); - I915_WRITE(DSPFW3, reg); - DRM_INFO("Big FIFO is disabled\n"); + I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN); } /* @@ -3024,12 +2944,12 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, int pixel_size) { struct drm_i915_private *dev_priv = dev->dev_private; + const struct cxsr_latency *latency; u32 reg; unsigned long wm; - struct cxsr_latency *latency; int sr_clock; - latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, + latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, dev_priv->fsb_freq, dev_priv->mem_freq); if (!latency) { DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); @@ -3075,9 +2995,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); /* activate cxsr */ - reg = I915_READ(DSPFW3); - reg |= PINEVIEW_SELF_REFRESH_EN; - I915_WRITE(DSPFW3, reg); + I915_WRITE(DSPFW3, + I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN); DRM_DEBUG_KMS("Self-refresh is enabled\n"); } else { pineview_disable_cxsr(dev); @@ -3354,12 +3273,11 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, int line_count; int planea_htotal = 0, planeb_htotal = 0; struct drm_crtc *crtc; - struct intel_crtc *intel_crtc; /* Need htotal for all active display plane */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - intel_crtc = to_intel_crtc(crtc); - if (crtc->enabled) { + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) { if (intel_crtc->plane == 0) planea_htotal = crtc->mode.htotal; else @@ -3519,7 +3437,6 @@ static void intel_update_watermarks(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; - struct intel_crtc *intel_crtc; int sr_hdisplay = 0; unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; int enabled = 0, pixel_size = 0; @@ -3530,8 +3447,8 @@ static void intel_update_watermarks(struct drm_device *dev) /* Get the clock config from both planes */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - intel_crtc = to_intel_crtc(crtc); - if (crtc->enabled) { + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) { enabled++; if (intel_crtc->plane == 0) { DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n", @@ -3589,10 +3506,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; - bool is_edp = false; + struct intel_encoder *has_edp_encoder = NULL; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_encoder *encoder; - struct intel_encoder *intel_encoder = NULL; const intel_limit_t *limit; int ret; struct fdi_m_n m_n = {0}; @@ -3613,12 +3529,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, drm_vblank_pre_modeset(dev, pipe); list_for_each_entry(encoder, &mode_config->encoder_list, head) { + struct intel_encoder *intel_encoder; - if (!encoder || encoder->crtc != crtc) + if (encoder->crtc != crtc) continue; intel_encoder = enc_to_intel_encoder(encoder); - switch (intel_encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; @@ -3642,7 +3558,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, is_dp = true; break; case INTEL_OUTPUT_EDP: - is_edp = true; + has_edp_encoder = intel_encoder; break; } @@ -3720,10 +3636,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, int lane = 0, link_bw, bpp; /* eDP doesn't require FDI link, so just set DP M/N according to current link config */ - if (is_edp) { + if (has_edp_encoder) { target_clock = mode->clock; - intel_edp_link_config(intel_encoder, - &lane, &link_bw); + intel_edp_link_config(has_edp_encoder, + &lane, &link_bw); } else { /* DP over FDI requires target mode clock instead of link clock */ @@ -3744,7 +3660,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, temp |= PIPE_8BPC; else temp |= PIPE_6BPC; - } else if (is_edp || (is_dp && intel_pch_has_edp(crtc))) { + } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) { switch (dev_priv->edp_bpp/3) { case 8: temp |= PIPE_8BPC; @@ -3817,7 +3733,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, udelay(200); - if (is_edp) { + if (has_edp_encoder) { if (dev_priv->lvds_use_ssc) { temp |= DREF_SSC1_ENABLE; I915_WRITE(PCH_DREF_CONTROL, temp); @@ -3966,9 +3882,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, dpll_reg = pch_dpll_reg; } - if (is_edp) { - ironlake_disable_pll_edp(crtc); - } else if ((dpll & DPLL_VCO_ENABLE)) { + if (!has_edp_encoder) { I915_WRITE(fp_reg, fp); I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); I915_READ(dpll_reg); @@ -4063,7 +3977,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } } - if (!is_edp) { + if (!has_edp_encoder) { I915_WRITE(fp_reg, fp); I915_WRITE(dpll_reg, dpll); I915_READ(dpll_reg); @@ -4142,7 +4056,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, I915_WRITE(link_m1_reg, m_n.link_m); I915_WRITE(link_n1_reg, m_n.link_n); - if (is_edp) { + if (has_edp_encoder) { ironlake_set_pll_edp(crtc, adjusted_mode->clock); } else { /* enable FDI RX PLL too */ @@ -4167,7 +4081,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, I915_WRITE(pipeconf_reg, pipeconf); I915_READ(pipeconf_reg); - intel_wait_for_vblank(dev); + intel_wait_for_vblank(dev, pipe); if (IS_IRONLAKE(dev)) { /* enable address swizzle for tiling buffer */ @@ -4180,9 +4094,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* Flush the plane changes */ ret = intel_pipe_set_base(crtc, x, y, old_fb); - if ((IS_I965G(dev) || plane == 0)) - intel_update_fbc(crtc, &crtc->mode); - intel_update_watermarks(dev); drm_vblank_post_modeset(dev, pipe); @@ -4216,6 +4127,62 @@ void intel_crtc_load_lut(struct drm_crtc *crtc) } } +static void i845_update_cursor(struct drm_crtc *crtc, u32 base) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + bool visible = base != 0; + u32 cntl; + + if (intel_crtc->cursor_visible == visible) + return; + + cntl = I915_READ(CURACNTR); + if (visible) { + /* On these chipsets we can only modify the base whilst + * the cursor is disabled. + */ + I915_WRITE(CURABASE, base); + + cntl &= ~(CURSOR_FORMAT_MASK); + /* XXX width must be 64, stride 256 => 0x00 << 28 */ + cntl |= CURSOR_ENABLE | + CURSOR_GAMMA_ENABLE | + CURSOR_FORMAT_ARGB; + } else + cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); + I915_WRITE(CURACNTR, cntl); + + intel_crtc->cursor_visible = visible; +} + +static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + bool visible = base != 0; + + if (intel_crtc->cursor_visible != visible) { + uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR); + if (base) { + cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); + cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; + cntl |= pipe << 28; /* Connect to correct pipe */ + } else { + cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); + cntl |= CURSOR_MODE_DISABLE; + } + I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl); + + intel_crtc->cursor_visible = visible; + } + /* and commit changes on next vblank */ + I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base); +} + /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ static void intel_crtc_update_cursor(struct drm_crtc *crtc) { @@ -4225,12 +4192,12 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc) int pipe = intel_crtc->pipe; int x = intel_crtc->cursor_x; int y = intel_crtc->cursor_y; - uint32_t base, pos; + u32 base, pos; bool visible; pos = 0; - if (crtc->fb) { + if (intel_crtc->cursor_on && crtc->fb) { base = intel_crtc->cursor_addr; if (x > (int) crtc->fb->width) base = 0; @@ -4259,37 +4226,14 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc) pos |= y << CURSOR_Y_SHIFT; visible = base != 0; - if (!visible && !intel_crtc->cursor_visble) + if (!visible && !intel_crtc->cursor_visible) return; I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos); - if (intel_crtc->cursor_visble != visible) { - uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR); - if (base) { - /* Hooray for CUR*CNTR differences */ - if (IS_MOBILE(dev) || IS_I9XX(dev)) { - cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); - cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; - cntl |= pipe << 28; /* Connect to correct pipe */ - } else { - cntl &= ~(CURSOR_FORMAT_MASK); - cntl |= CURSOR_ENABLE; - cntl |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE; - } - } else { - if (IS_MOBILE(dev) || IS_I9XX(dev)) { - cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); - cntl |= CURSOR_MODE_DISABLE; - } else { - cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); - } - } - I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl); - - intel_crtc->cursor_visble = visible; - } - /* and commit changes on next vblank */ - I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base); + if (IS_845G(dev) || IS_I865G(dev)) + i845_update_cursor(crtc, base); + else + i9xx_update_cursor(crtc, base); if (visible) intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj); @@ -4354,8 +4298,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, addr = obj_priv->gtt_offset; } else { + int align = IS_I830(dev) ? 16 * 1024 : 256; ret = i915_gem_attach_phys_object(dev, bo, - (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); + (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, + align); if (ret) { DRM_ERROR("failed to attach phys object\n"); goto fail_locked; @@ -4544,7 +4490,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, encoder_funcs->commit(encoder); } /* let the connector get through one full cycle before testing */ - intel_wait_for_vblank(dev); + intel_wait_for_vblank(dev, intel_crtc->pipe); return crtc; } @@ -4749,7 +4695,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) dpll &= ~DISPLAY_RATE_SELECT_FPA1; I915_WRITE(dpll_reg, dpll); dpll = I915_READ(dpll_reg); - intel_wait_for_vblank(dev); + intel_wait_for_vblank(dev, pipe); dpll = I915_READ(dpll_reg); if (dpll & DISPLAY_RATE_SELECT_FPA1) DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); @@ -4793,7 +4739,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) dpll |= DISPLAY_RATE_SELECT_FPA1; I915_WRITE(dpll_reg, dpll); dpll = I915_READ(dpll_reg); - intel_wait_for_vblank(dev); + intel_wait_for_vblank(dev, pipe); dpll = I915_READ(dpll_reg); if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); @@ -4916,15 +4862,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) kfree(intel_crtc); } -struct intel_unpin_work { - struct work_struct work; - struct drm_device *dev; - struct drm_gem_object *old_fb_obj; - struct drm_gem_object *pending_flip_obj; - struct drm_pending_vblank_event *event; - int pending; -}; - static void intel_unpin_work_fn(struct work_struct *__work) { struct intel_unpin_work *work = @@ -5012,7 +4949,8 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane) spin_lock_irqsave(&dev->event_lock, flags); if (intel_crtc->unpin_work) { - intel_crtc->unpin_work->pending = 1; + if ((++intel_crtc->unpin_work->pending) > 1) + DRM_ERROR("Prepared flip multiple times\n"); } else { DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); } @@ -5031,9 +4969,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_unpin_work *work; unsigned long flags, offset; - int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; - int ret, pipesrc; - u32 flip_mask; + int pipe = intel_crtc->pipe; + u32 pf, pipesrc; + int ret; work = kzalloc(sizeof *work, GFP_KERNEL); if (work == NULL) @@ -5082,34 +5020,73 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, atomic_inc(&obj_priv->pending_flip); work->pending_flip_obj = obj; - if (intel_crtc->plane) - flip_mask = I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; - else - flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT; + if (IS_GEN3(dev) || IS_GEN2(dev)) { + u32 flip_mask; - /* Wait for any previous flip to finish */ - if (IS_GEN3(dev)) - while (I915_READ(ISR) & flip_mask) - ; + if (intel_crtc->plane) + flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; + else + flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; + + BEGIN_LP_RING(2); + OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); + OUT_RING(0); + ADVANCE_LP_RING(); + } + + work->enable_stall_check = true; /* Offset into the new buffer for cases of shared fbs between CRTCs */ - offset = obj_priv->gtt_offset; - offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8); + offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; BEGIN_LP_RING(4); - if (IS_I965G(dev)) { + switch(INTEL_INFO(dev)->gen) { + case 2: OUT_RING(MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); OUT_RING(fb->pitch); - OUT_RING(offset | obj_priv->tiling_mode); - pipesrc = I915_READ(pipesrc_reg); - OUT_RING(pipesrc & 0x0fff0fff); - } else { + OUT_RING(obj_priv->gtt_offset + offset); + OUT_RING(MI_NOOP); + break; + + case 3: OUT_RING(MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); OUT_RING(fb->pitch); - OUT_RING(offset); + OUT_RING(obj_priv->gtt_offset + offset); OUT_RING(MI_NOOP); + break; + + case 4: + case 5: + /* i965+ uses the linear or tiled offsets from the + * Display Registers (which do not change across a page-flip) + * so we need only reprogram the base address. + */ + OUT_RING(MI_DISPLAY_FLIP | + MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); + OUT_RING(fb->pitch); + OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); + + /* XXX Enabling the panel-fitter across page-flip is so far + * untested on non-native modes, so ignore it for now. + * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; + */ + pf = 0; + pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; + OUT_RING(pf | pipesrc); + break; + + case 6: + OUT_RING(MI_DISPLAY_FLIP | + MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); + OUT_RING(fb->pitch | obj_priv->tiling_mode); + OUT_RING(obj_priv->gtt_offset); + + pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; + pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; + OUT_RING(pf | pipesrc); + break; } ADVANCE_LP_RING(); @@ -5190,7 +5167,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; intel_crtc->cursor_addr = 0; - intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; + intel_crtc->dpms_mode = -1; drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); intel_crtc->busy = false; @@ -5432,37 +5409,37 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { }; static struct drm_gem_object * -intel_alloc_power_context(struct drm_device *dev) +intel_alloc_context_page(struct drm_device *dev) { - struct drm_gem_object *pwrctx; + struct drm_gem_object *ctx; int ret; - pwrctx = i915_gem_alloc_object(dev, 4096); - if (!pwrctx) { + ctx = i915_gem_alloc_object(dev, 4096); + if (!ctx) { DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); return NULL; } mutex_lock(&dev->struct_mutex); - ret = i915_gem_object_pin(pwrctx, 4096); + ret = i915_gem_object_pin(ctx, 4096); if (ret) { DRM_ERROR("failed to pin power context: %d\n", ret); goto err_unref; } - ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1); + ret = i915_gem_object_set_to_gtt_domain(ctx, 1); if (ret) { DRM_ERROR("failed to set-domain on power context: %d\n", ret); goto err_unpin; } mutex_unlock(&dev->struct_mutex); - return pwrctx; + return ctx; err_unpin: - i915_gem_object_unpin(pwrctx); + i915_gem_object_unpin(ctx); err_unref: - drm_gem_object_unreference(pwrctx); + drm_gem_object_unreference(ctx); mutex_unlock(&dev->struct_mutex); return NULL; } @@ -5494,7 +5471,6 @@ void ironlake_enable_drps(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; u32 rgvmodectl = I915_READ(MEMMODECTL); u8 fmax, fmin, fstart, vstart; - int i = 0; /* 100ms RC evaluation intervals */ I915_WRITE(RCUPEI, 100000); @@ -5538,13 +5514,8 @@ void ironlake_enable_drps(struct drm_device *dev) rgvmodectl |= MEMMODE_SWMODE_EN; I915_WRITE(MEMMODECTL, rgvmodectl); - while (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) { - if (i++ > 100) { - DRM_ERROR("stuck trying to change perf mode\n"); - break; - } - msleep(1); - } + if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 1, 0)) + DRM_ERROR("stuck trying to change perf mode\n"); msleep(1); ironlake_set_drps(dev, fstart); @@ -5725,7 +5696,8 @@ void intel_init_clock_gating(struct drm_device *dev) ILK_DPFC_DIS2 | ILK_CLK_FBC); } - return; + if (IS_GEN6(dev)) + return; } else if (IS_G4X(dev)) { uint32_t dspclk_gate; I915_WRITE(RENCLK_GATE_D1, 0); @@ -5768,6 +5740,31 @@ void intel_init_clock_gating(struct drm_device *dev) * GPU can automatically power down the render unit if given a page * to save state. */ + if (IS_IRONLAKE_M(dev)) { + if (dev_priv->renderctx == NULL) + dev_priv->renderctx = intel_alloc_context_page(dev); + if (dev_priv->renderctx) { + struct drm_i915_gem_object *obj_priv; + obj_priv = to_intel_bo(dev_priv->renderctx); + if (obj_priv) { + BEGIN_LP_RING(4); + OUT_RING(MI_SET_CONTEXT); + OUT_RING(obj_priv->gtt_offset | + MI_MM_SPACE_GTT | + MI_SAVE_EXT_STATE_EN | + MI_RESTORE_EXT_STATE_EN | + MI_RESTORE_INHIBIT); + OUT_RING(MI_NOOP); + OUT_RING(MI_FLUSH); + ADVANCE_LP_RING(); + } + } else { + DRM_DEBUG_KMS("Failed to allocate render context." + "Disable RC6\n"); + return; + } + } + if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { struct drm_i915_gem_object *obj_priv = NULL; @@ -5776,7 +5773,7 @@ void intel_init_clock_gating(struct drm_device *dev) } else { struct drm_gem_object *pwrctx; - pwrctx = intel_alloc_power_context(dev); + pwrctx = intel_alloc_context_page(dev); if (pwrctx) { dev_priv->pwrctx = pwrctx; obj_priv = to_intel_bo(pwrctx); @@ -5948,6 +5945,29 @@ static void intel_init_quirks(struct drm_device *dev) } } +/* Disable the VGA plane that we never use */ +static void i915_disable_vga(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u8 sr1; + u32 vga_reg; + + if (HAS_PCH_SPLIT(dev)) + vga_reg = CPU_VGACNTRL; + else + vga_reg = VGACNTRL; + + vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); + outb(1, VGA_SR_INDEX); + sr1 = inb(VGA_SR_DATA); + outb(sr1 | 1<<5, VGA_SR_DATA); + vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); + udelay(300); + + I915_WRITE(vga_reg, VGA_DISP_DISABLE); + POSTING_READ(vga_reg); +} + void intel_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -5996,6 +6016,9 @@ void intel_modeset_init(struct drm_device *dev) intel_init_clock_gating(dev); + /* Just disable it once at startup */ + i915_disable_vga(dev); + if (IS_IRONLAKE_M(dev)) { ironlake_enable_drps(dev); intel_init_emon(dev); @@ -6034,6 +6057,16 @@ void intel_modeset_cleanup(struct drm_device *dev) if (dev_priv->display.disable_fbc) dev_priv->display.disable_fbc(dev); + if (dev_priv->renderctx) { + struct drm_i915_gem_object *obj_priv; + + obj_priv = to_intel_bo(dev_priv->renderctx); + I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN); + I915_READ(CCID); + i915_gem_object_unpin(dev_priv->renderctx); + drm_gem_object_unreference(dev_priv->renderctx); + } + if (dev_priv->pwrctx) { struct drm_i915_gem_object *obj_priv; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 40be1fa65be..51d142939a2 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -42,10 +42,11 @@ #define DP_LINK_CONFIGURATION_SIZE 9 -#define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP) -#define IS_PCH_eDP(dp_priv) ((dp_priv)->is_pch_edp) +#define IS_eDP(i) ((i)->base.type == INTEL_OUTPUT_EDP) +#define IS_PCH_eDP(i) ((i)->is_pch_edp) -struct intel_dp_priv { +struct intel_dp { + struct intel_encoder base; uint32_t output_reg; uint32_t DP; uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; @@ -54,40 +55,39 @@ struct intel_dp_priv { uint8_t link_bw; uint8_t lane_count; uint8_t dpcd[4]; - struct intel_encoder *intel_encoder; struct i2c_adapter adapter; struct i2c_algo_dp_aux_data algo; bool is_pch_edp; }; -static void -intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, - uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]); +static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) +{ + return container_of(enc_to_intel_encoder(encoder), struct intel_dp, base); +} -static void -intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP); +static void intel_dp_link_train(struct intel_dp *intel_dp); +static void intel_dp_link_down(struct intel_dp *intel_dp); void intel_edp_link_config (struct intel_encoder *intel_encoder, - int *lane_num, int *link_bw) + int *lane_num, int *link_bw) { - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); - *lane_num = dp_priv->lane_count; - if (dp_priv->link_bw == DP_LINK_BW_1_62) + *lane_num = intel_dp->lane_count; + if (intel_dp->link_bw == DP_LINK_BW_1_62) *link_bw = 162000; - else if (dp_priv->link_bw == DP_LINK_BW_2_7) + else if (intel_dp->link_bw == DP_LINK_BW_2_7) *link_bw = 270000; } static int -intel_dp_max_lane_count(struct intel_encoder *intel_encoder) +intel_dp_max_lane_count(struct intel_dp *intel_dp) { - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; int max_lane_count = 4; - if (dp_priv->dpcd[0] >= 0x11) { - max_lane_count = dp_priv->dpcd[2] & 0x1f; + if (intel_dp->dpcd[0] >= 0x11) { + max_lane_count = intel_dp->dpcd[2] & 0x1f; switch (max_lane_count) { case 1: case 2: case 4: break; @@ -99,10 +99,9 @@ intel_dp_max_lane_count(struct intel_encoder *intel_encoder) } static int -intel_dp_max_link_bw(struct intel_encoder *intel_encoder) +intel_dp_max_link_bw(struct intel_dp *intel_dp) { - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; - int max_link_bw = dp_priv->dpcd[1]; + int max_link_bw = intel_dp->dpcd[1]; switch (max_link_bw) { case DP_LINK_BW_1_62: @@ -126,13 +125,11 @@ intel_dp_link_clock(uint8_t link_bw) /* I think this is a fiction */ static int -intel_dp_link_required(struct drm_device *dev, - struct intel_encoder *intel_encoder, int pixel_clock) +intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; - if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) + if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) return (pixel_clock * dev_priv->edp_bpp) / 8; else return pixel_clock * 3; @@ -149,14 +146,13 @@ intel_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; - int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); - int max_lanes = intel_dp_max_lane_count(intel_encoder); + int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); + int max_lanes = intel_dp_max_lane_count(intel_dp); - if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) && + if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && dev_priv->panel_fixed_mode) { if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay) return MODE_PANEL; @@ -167,8 +163,8 @@ intel_dp_mode_valid(struct drm_connector *connector, /* only refuse the mode on non eDP since we have seen some wierd eDP panels which are outside spec tolerances but somehow work by magic */ - if (!IS_eDP(intel_encoder) && - (intel_dp_link_required(connector->dev, intel_encoder, mode->clock) + if (!IS_eDP(intel_dp) && + (intel_dp_link_required(connector->dev, intel_dp, mode->clock) > intel_dp_max_data_rate(max_link_clock, max_lanes))) return MODE_CLOCK_HIGH; @@ -232,19 +228,17 @@ intel_hrawclk(struct drm_device *dev) } static int -intel_dp_aux_ch(struct intel_encoder *intel_encoder, +intel_dp_aux_ch(struct intel_dp *intel_dp, uint8_t *send, int send_bytes, uint8_t *recv, int recv_size) { - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; - uint32_t output_reg = dp_priv->output_reg; - struct drm_device *dev = intel_encoder->enc.dev; + uint32_t output_reg = intel_dp->output_reg; + struct drm_device *dev = intel_dp->base.enc.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t ch_ctl = output_reg + 0x10; uint32_t ch_data = ch_ctl + 4; int i; int recv_bytes; - uint32_t ctl; uint32_t status; uint32_t aux_clock_divider; int try, precharge; @@ -253,7 +247,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder, * and would like to run at 2MHz. So, take the * hrawclk value and divide by 2 and use that */ - if (IS_eDP(intel_encoder)) { + if (IS_eDP(intel_dp)) { if (IS_GEN6(dev)) aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ else @@ -268,41 +262,43 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder, else precharge = 5; + if (I915_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) { + DRM_ERROR("dp_aux_ch not started status 0x%08x\n", + I915_READ(ch_ctl)); + return -EBUSY; + } + /* Must try at least 3 times according to DP spec */ for (try = 0; try < 5; try++) { /* Load the send data into the aux channel data registers */ - for (i = 0; i < send_bytes; i += 4) { - uint32_t d = pack_aux(send + i, send_bytes - i); - - I915_WRITE(ch_data + i, d); - } - - ctl = (DP_AUX_CH_CTL_SEND_BUSY | - DP_AUX_CH_CTL_TIME_OUT_400us | - (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | - (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | - (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | - DP_AUX_CH_CTL_DONE | - DP_AUX_CH_CTL_TIME_OUT_ERROR | - DP_AUX_CH_CTL_RECEIVE_ERROR); + for (i = 0; i < send_bytes; i += 4) + I915_WRITE(ch_data + i, + pack_aux(send + i, send_bytes - i)); /* Send the command and wait for it to complete */ - I915_WRITE(ch_ctl, ctl); - (void) I915_READ(ch_ctl); + I915_WRITE(ch_ctl, + DP_AUX_CH_CTL_SEND_BUSY | + DP_AUX_CH_CTL_TIME_OUT_400us | + (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | + (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | + (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_RECEIVE_ERROR); for (;;) { - udelay(100); status = I915_READ(ch_ctl); if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) break; + udelay(100); } /* Clear done status and any errors */ - I915_WRITE(ch_ctl, (status | - DP_AUX_CH_CTL_DONE | - DP_AUX_CH_CTL_TIME_OUT_ERROR | - DP_AUX_CH_CTL_RECEIVE_ERROR)); - (void) I915_READ(ch_ctl); - if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0) + I915_WRITE(ch_ctl, + status | + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_RECEIVE_ERROR); + if (status & DP_AUX_CH_CTL_DONE) break; } @@ -329,22 +325,19 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder, /* Unload any bytes sent back from the other side */ recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); - if (recv_bytes > recv_size) recv_bytes = recv_size; - for (i = 0; i < recv_bytes; i += 4) { - uint32_t d = I915_READ(ch_data + i); - - unpack_aux(d, recv + i, recv_bytes - i); - } + for (i = 0; i < recv_bytes; i += 4) + unpack_aux(I915_READ(ch_data + i), + recv + i, recv_bytes - i); return recv_bytes; } /* Write data to the aux channel in native mode */ static int -intel_dp_aux_native_write(struct intel_encoder *intel_encoder, +intel_dp_aux_native_write(struct intel_dp *intel_dp, uint16_t address, uint8_t *send, int send_bytes) { int ret; @@ -361,7 +354,7 @@ intel_dp_aux_native_write(struct intel_encoder *intel_encoder, memcpy(&msg[4], send, send_bytes); msg_bytes = send_bytes + 4; for (;;) { - ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, &ack, 1); + ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); if (ret < 0) return ret; if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) @@ -376,15 +369,15 @@ intel_dp_aux_native_write(struct intel_encoder *intel_encoder, /* Write a single byte to the aux channel in native mode */ static int -intel_dp_aux_native_write_1(struct intel_encoder *intel_encoder, +intel_dp_aux_native_write_1(struct intel_dp *intel_dp, uint16_t address, uint8_t byte) { - return intel_dp_aux_native_write(intel_encoder, address, &byte, 1); + return intel_dp_aux_native_write(intel_dp, address, &byte, 1); } /* read bytes from a native aux channel */ static int -intel_dp_aux_native_read(struct intel_encoder *intel_encoder, +intel_dp_aux_native_read(struct intel_dp *intel_dp, uint16_t address, uint8_t *recv, int recv_bytes) { uint8_t msg[4]; @@ -403,7 +396,7 @@ intel_dp_aux_native_read(struct intel_encoder *intel_encoder, reply_bytes = recv_bytes + 1; for (;;) { - ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, + ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, reply, reply_bytes); if (ret == 0) return -EPROTO; @@ -426,10 +419,9 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, uint8_t write_byte, uint8_t *read_byte) { struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; - struct intel_dp_priv *dp_priv = container_of(adapter, - struct intel_dp_priv, - adapter); - struct intel_encoder *intel_encoder = dp_priv->intel_encoder; + struct intel_dp *intel_dp = container_of(adapter, + struct intel_dp, + adapter); uint16_t address = algo_data->address; uint8_t msg[5]; uint8_t reply[2]; @@ -468,7 +460,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, } for (;;) { - ret = intel_dp_aux_ch(intel_encoder, + ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, reply, reply_bytes); if (ret < 0) { @@ -496,57 +488,42 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, } static int -intel_dp_i2c_init(struct intel_encoder *intel_encoder, +intel_dp_i2c_init(struct intel_dp *intel_dp, struct intel_connector *intel_connector, const char *name) { - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; - DRM_DEBUG_KMS("i2c_init %s\n", name); - dp_priv->algo.running = false; - dp_priv->algo.address = 0; - dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch; - - memset(&dp_priv->adapter, '\0', sizeof (dp_priv->adapter)); - dp_priv->adapter.owner = THIS_MODULE; - dp_priv->adapter.class = I2C_CLASS_DDC; - strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); - dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; - dp_priv->adapter.algo_data = &dp_priv->algo; - dp_priv->adapter.dev.parent = &intel_connector->base.kdev; - - return i2c_dp_aux_add_bus(&dp_priv->adapter); + intel_dp->algo.running = false; + intel_dp->algo.address = 0; + intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch; + + memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter)); + intel_dp->adapter.owner = THIS_MODULE; + intel_dp->adapter.class = I2C_CLASS_DDC; + strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); + intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; + intel_dp->adapter.algo_data = &intel_dp->algo; + intel_dp->adapter.dev.parent = &intel_connector->base.kdev; + + return i2c_dp_aux_add_bus(&intel_dp->adapter); } static bool intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); int lane_count, clock; - int max_lane_count = intel_dp_max_lane_count(intel_encoder); - int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0; + int max_lane_count = intel_dp_max_lane_count(intel_dp); + int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; - if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) && + if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && dev_priv->panel_fixed_mode) { - struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode; - - adjusted_mode->hdisplay = fixed_mode->hdisplay; - adjusted_mode->hsync_start = fixed_mode->hsync_start; - adjusted_mode->hsync_end = fixed_mode->hsync_end; - adjusted_mode->htotal = fixed_mode->htotal; - - adjusted_mode->vdisplay = fixed_mode->vdisplay; - adjusted_mode->vsync_start = fixed_mode->vsync_start; - adjusted_mode->vsync_end = fixed_mode->vsync_end; - adjusted_mode->vtotal = fixed_mode->vtotal; - - adjusted_mode->clock = fixed_mode->clock; - drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); - + intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode); + intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, + mode, adjusted_mode); /* * the mode->clock is used to calculate the Data&Link M/N * of the pipe. For the eDP the fixed clock should be used. @@ -558,31 +535,33 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, for (clock = 0; clock <= max_clock; clock++) { int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); - if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock) + if (intel_dp_link_required(encoder->dev, intel_dp, mode->clock) <= link_avail) { - dp_priv->link_bw = bws[clock]; - dp_priv->lane_count = lane_count; - adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); + intel_dp->link_bw = bws[clock]; + intel_dp->lane_count = lane_count; + adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); DRM_DEBUG_KMS("Display port link bw %02x lane " "count %d clock %d\n", - dp_priv->link_bw, dp_priv->lane_count, + intel_dp->link_bw, intel_dp->lane_count, adjusted_mode->clock); return true; } } } - if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { + if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { /* okay we failed just pick the highest */ - dp_priv->lane_count = max_lane_count; - dp_priv->link_bw = bws[max_clock]; - adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); + intel_dp->lane_count = max_lane_count; + intel_dp->link_bw = bws[max_clock]; + adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); DRM_DEBUG_KMS("Force picking display port link bw %02x lane " "count %d clock %d\n", - dp_priv->link_bw, dp_priv->lane_count, + intel_dp->link_bw, intel_dp->lane_count, adjusted_mode->clock); + return true; } + return false; } @@ -626,17 +605,14 @@ bool intel_pch_has_edp(struct drm_crtc *crtc) struct drm_encoder *encoder; list_for_each_entry(encoder, &mode_config->encoder_list, head) { - struct intel_encoder *intel_encoder; - struct intel_dp_priv *dp_priv; + struct intel_dp *intel_dp; - if (!encoder || encoder->crtc != crtc) + if (encoder->crtc != crtc) continue; - intel_encoder = enc_to_intel_encoder(encoder); - dp_priv = intel_encoder->dev_priv; - - if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) - return dp_priv->is_pch_edp; + intel_dp = enc_to_intel_dp(encoder); + if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) + return intel_dp->is_pch_edp; } return false; } @@ -657,18 +633,15 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, * Find the lane count in the intel_encoder private */ list_for_each_entry(encoder, &mode_config->encoder_list, head) { - struct intel_encoder *intel_encoder; - struct intel_dp_priv *dp_priv; + struct intel_dp *intel_dp; if (encoder->crtc != crtc) continue; - intel_encoder = enc_to_intel_encoder(encoder); - dp_priv = intel_encoder->dev_priv; - - if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { - lane_count = dp_priv->lane_count; - if (IS_PCH_eDP(dp_priv)) + intel_dp = enc_to_intel_dp(encoder); + if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { + lane_count = intel_dp->lane_count; + if (IS_PCH_eDP(intel_dp)) bpp = dev_priv->edp_bpp; break; } @@ -724,107 +697,114 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; - struct drm_crtc *crtc = intel_encoder->enc.crtc; + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct drm_crtc *crtc = intel_dp->base.enc.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - dp_priv->DP = (DP_VOLTAGE_0_4 | + intel_dp->DP = (DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0); if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) - dp_priv->DP |= DP_SYNC_HS_HIGH; + intel_dp->DP |= DP_SYNC_HS_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) - dp_priv->DP |= DP_SYNC_VS_HIGH; + intel_dp->DP |= DP_SYNC_VS_HIGH; - if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) - dp_priv->DP |= DP_LINK_TRAIN_OFF_CPT; + if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) + intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; else - dp_priv->DP |= DP_LINK_TRAIN_OFF; + intel_dp->DP |= DP_LINK_TRAIN_OFF; - switch (dp_priv->lane_count) { + switch (intel_dp->lane_count) { case 1: - dp_priv->DP |= DP_PORT_WIDTH_1; + intel_dp->DP |= DP_PORT_WIDTH_1; break; case 2: - dp_priv->DP |= DP_PORT_WIDTH_2; + intel_dp->DP |= DP_PORT_WIDTH_2; break; case 4: - dp_priv->DP |= DP_PORT_WIDTH_4; + intel_dp->DP |= DP_PORT_WIDTH_4; break; } - if (dp_priv->has_audio) - dp_priv->DP |= DP_AUDIO_OUTPUT_ENABLE; + if (intel_dp->has_audio) + intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; - memset(dp_priv->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); - dp_priv->link_configuration[0] = dp_priv->link_bw; - dp_priv->link_configuration[1] = dp_priv->lane_count; + memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); + intel_dp->link_configuration[0] = intel_dp->link_bw; + intel_dp->link_configuration[1] = intel_dp->lane_count; /* * Check for DPCD version > 1.1 and enhanced framing support */ - if (dp_priv->dpcd[0] >= 0x11 && (dp_priv->dpcd[2] & DP_ENHANCED_FRAME_CAP)) { - dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; - dp_priv->DP |= DP_ENHANCED_FRAMING; + if (intel_dp->dpcd[0] >= 0x11 && (intel_dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)) { + intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + intel_dp->DP |= DP_ENHANCED_FRAMING; } /* CPT DP's pipe select is decided in TRANS_DP_CTL */ if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) - dp_priv->DP |= DP_PIPEB_SELECT; + intel_dp->DP |= DP_PIPEB_SELECT; - if (IS_eDP(intel_encoder)) { + if (IS_eDP(intel_dp)) { /* don't miss out required setting for eDP */ - dp_priv->DP |= DP_PLL_ENABLE; + intel_dp->DP |= DP_PLL_ENABLE; if (adjusted_mode->clock < 200000) - dp_priv->DP |= DP_PLL_FREQ_160MHZ; + intel_dp->DP |= DP_PLL_FREQ_160MHZ; else - dp_priv->DP |= DP_PLL_FREQ_270MHZ; + intel_dp->DP |= DP_PLL_FREQ_270MHZ; } } static void ironlake_edp_panel_on (struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long timeout = jiffies + msecs_to_jiffies(5000); - u32 pp, pp_status; + u32 pp; - pp_status = I915_READ(PCH_PP_STATUS); - if (pp_status & PP_ON) + if (I915_READ(PCH_PP_STATUS) & PP_ON) return; pp = I915_READ(PCH_PP_CONTROL); + + /* ILK workaround: disable reset around power sequence */ + pp &= ~PANEL_POWER_RESET; + I915_WRITE(PCH_PP_CONTROL, pp); + POSTING_READ(PCH_PP_CONTROL); + pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON; I915_WRITE(PCH_PP_CONTROL, pp); - do { - pp_status = I915_READ(PCH_PP_STATUS); - } while (((pp_status & PP_ON) == 0) && !time_after(jiffies, timeout)); - if (time_after(jiffies, timeout)) - DRM_DEBUG_KMS("panel on wait timed out: 0x%08x\n", pp_status); + if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000, 10)) + DRM_ERROR("panel on wait timed out: 0x%08x\n", + I915_READ(PCH_PP_STATUS)); pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD); + pp |= PANEL_POWER_RESET; /* restore panel reset bit */ I915_WRITE(PCH_PP_CONTROL, pp); + POSTING_READ(PCH_PP_CONTROL); } static void ironlake_edp_panel_off (struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long timeout = jiffies + msecs_to_jiffies(5000); - u32 pp, pp_status; + u32 pp; pp = I915_READ(PCH_PP_CONTROL); + + /* ILK workaround: disable reset around power sequence */ + pp &= ~PANEL_POWER_RESET; + I915_WRITE(PCH_PP_CONTROL, pp); + POSTING_READ(PCH_PP_CONTROL); + pp &= ~POWER_TARGET_ON; I915_WRITE(PCH_PP_CONTROL, pp); - do { - pp_status = I915_READ(PCH_PP_STATUS); - } while ((pp_status & PP_ON) && !time_after(jiffies, timeout)); - if (time_after(jiffies, timeout)) - DRM_DEBUG_KMS("panel off wait timed out\n"); + if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000, 10)) + DRM_ERROR("panel off wait timed out: 0x%08x\n", + I915_READ(PCH_PP_STATUS)); /* Make sure VDD is enabled so DP AUX will work */ - pp |= EDP_FORCE_VDD; + pp |= EDP_FORCE_VDD | PANEL_POWER_RESET; /* restore panel reset bit */ I915_WRITE(PCH_PP_CONTROL, pp); + POSTING_READ(PCH_PP_CONTROL); } static void ironlake_edp_backlight_on (struct drm_device *dev) @@ -849,33 +829,87 @@ static void ironlake_edp_backlight_off (struct drm_device *dev) I915_WRITE(PCH_PP_CONTROL, pp); } +static void ironlake_edp_pll_on(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dpa_ctl; + + DRM_DEBUG_KMS("\n"); + dpa_ctl = I915_READ(DP_A); + dpa_ctl &= ~DP_PLL_ENABLE; + I915_WRITE(DP_A, dpa_ctl); +} + +static void ironlake_edp_pll_off(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dpa_ctl; + + dpa_ctl = I915_READ(DP_A); + dpa_ctl |= DP_PLL_ENABLE; + I915_WRITE(DP_A, dpa_ctl); + udelay(200); +} + +static void intel_dp_prepare(struct drm_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dp_reg = I915_READ(intel_dp->output_reg); + + if (IS_eDP(intel_dp)) { + ironlake_edp_backlight_off(dev); + ironlake_edp_panel_on(dev); + ironlake_edp_pll_on(encoder); + } + if (dp_reg & DP_PORT_EN) + intel_dp_link_down(intel_dp); +} + +static void intel_dp_commit(struct drm_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dp_reg = I915_READ(intel_dp->output_reg); + + if (!(dp_reg & DP_PORT_EN)) { + intel_dp_link_train(intel_dp); + } + if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) + ironlake_edp_backlight_on(dev); +} + static void intel_dp_dpms(struct drm_encoder *encoder, int mode) { - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dp_reg = I915_READ(dp_priv->output_reg); + uint32_t dp_reg = I915_READ(intel_dp->output_reg); if (mode != DRM_MODE_DPMS_ON) { - if (dp_reg & DP_PORT_EN) { - intel_dp_link_down(intel_encoder, dp_priv->DP); - if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { - ironlake_edp_backlight_off(dev); - ironlake_edp_panel_off(dev); - } + if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { + ironlake_edp_backlight_off(dev); + ironlake_edp_panel_off(dev); } + if (dp_reg & DP_PORT_EN) + intel_dp_link_down(intel_dp); + if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) + ironlake_edp_pll_off(encoder); } else { if (!(dp_reg & DP_PORT_EN)) { - intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); - if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { + if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) ironlake_edp_panel_on(dev); + intel_dp_link_train(intel_dp); + if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) ironlake_edp_backlight_on(dev); - } } } - dp_priv->dpms_mode = mode; + intel_dp->dpms_mode = mode; } /* @@ -883,12 +917,12 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) * link status information */ static bool -intel_dp_get_link_status(struct intel_encoder *intel_encoder, +intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) { int ret; - ret = intel_dp_aux_native_read(intel_encoder, + ret = intel_dp_aux_native_read(intel_dp, DP_LANE0_1_STATUS, link_status, DP_LINK_STATUS_SIZE); if (ret != DP_LINK_STATUS_SIZE) @@ -965,7 +999,7 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing) } static void -intel_get_adjust_train(struct intel_encoder *intel_encoder, +intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count, uint8_t train_set[4]) @@ -1101,27 +1135,27 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) } static bool -intel_dp_set_link_train(struct intel_encoder *intel_encoder, +intel_dp_set_link_train(struct intel_dp *intel_dp, uint32_t dp_reg_value, uint8_t dp_train_pat, uint8_t train_set[4], bool first) { - struct drm_device *dev = intel_encoder->enc.dev; + struct drm_device *dev = intel_dp->base.enc.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc); int ret; - I915_WRITE(dp_priv->output_reg, dp_reg_value); - POSTING_READ(dp_priv->output_reg); + I915_WRITE(intel_dp->output_reg, dp_reg_value); + POSTING_READ(intel_dp->output_reg); if (first) - intel_wait_for_vblank(dev); + intel_wait_for_vblank(dev, intel_crtc->pipe); - intel_dp_aux_native_write_1(intel_encoder, + intel_dp_aux_native_write_1(intel_dp, DP_TRAINING_PATTERN_SET, dp_train_pat); - ret = intel_dp_aux_native_write(intel_encoder, + ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET, train_set, 4); if (ret != 4) return false; @@ -1130,12 +1164,10 @@ intel_dp_set_link_train(struct intel_encoder *intel_encoder, } static void -intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, - uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) +intel_dp_link_train(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_encoder->enc.dev; + struct drm_device *dev = intel_dp->base.enc.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; uint8_t train_set[4]; uint8_t link_status[DP_LINK_STATUS_SIZE]; int i; @@ -1145,13 +1177,15 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, bool first = true; int tries; u32 reg; + uint32_t DP = intel_dp->DP; /* Write the link configuration data */ - intel_dp_aux_native_write(intel_encoder, DP_LINK_BW_SET, - link_configuration, DP_LINK_CONFIGURATION_SIZE); + intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, + intel_dp->link_configuration, + DP_LINK_CONFIGURATION_SIZE); DP |= DP_PORT_EN; - if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) + if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) DP &= ~DP_LINK_TRAIN_MASK_CPT; else DP &= ~DP_LINK_TRAIN_MASK; @@ -1162,39 +1196,39 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, for (;;) { /* Use train_set[0] to set the voltage and pre emphasis values */ uint32_t signal_levels; - if (IS_GEN6(dev) && IS_eDP(intel_encoder)) { + if (IS_GEN6(dev) && IS_eDP(intel_dp)) { signal_levels = intel_gen6_edp_signal_levels(train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { - signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); + signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } - if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) + if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) reg = DP | DP_LINK_TRAIN_PAT_1_CPT; else reg = DP | DP_LINK_TRAIN_PAT_1; - if (!intel_dp_set_link_train(intel_encoder, reg, + if (!intel_dp_set_link_train(intel_dp, reg, DP_TRAINING_PATTERN_1, train_set, first)) break; first = false; /* Set training pattern 1 */ udelay(100); - if (!intel_dp_get_link_status(intel_encoder, link_status)) + if (!intel_dp_get_link_status(intel_dp, link_status)) break; - if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) { + if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { clock_recovery = true; break; } /* Check to see if we've tried the max voltage */ - for (i = 0; i < dp_priv->lane_count; i++) + for (i = 0; i < intel_dp->lane_count; i++) if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) break; - if (i == dp_priv->lane_count) + if (i == intel_dp->lane_count) break; /* Check to see if we've tried the same voltage 5 times */ @@ -1207,7 +1241,7 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; /* Compute new train_set as requested by target */ - intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set); + intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set); } /* channel equalization */ @@ -1217,30 +1251,30 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, /* Use train_set[0] to set the voltage and pre emphasis values */ uint32_t signal_levels; - if (IS_GEN6(dev) && IS_eDP(intel_encoder)) { + if (IS_GEN6(dev) && IS_eDP(intel_dp)) { signal_levels = intel_gen6_edp_signal_levels(train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { - signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); + signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } - if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) + if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) reg = DP | DP_LINK_TRAIN_PAT_2_CPT; else reg = DP | DP_LINK_TRAIN_PAT_2; /* channel eq pattern */ - if (!intel_dp_set_link_train(intel_encoder, reg, + if (!intel_dp_set_link_train(intel_dp, reg, DP_TRAINING_PATTERN_2, train_set, false)) break; udelay(400); - if (!intel_dp_get_link_status(intel_encoder, link_status)) + if (!intel_dp_get_link_status(intel_dp, link_status)) break; - if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) { + if (intel_channel_eq_ok(link_status, intel_dp->lane_count)) { channel_eq = true; break; } @@ -1250,53 +1284,53 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, break; /* Compute new train_set as requested by target */ - intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set); + intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set); ++tries; } - if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) + if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) reg = DP | DP_LINK_TRAIN_OFF_CPT; else reg = DP | DP_LINK_TRAIN_OFF; - I915_WRITE(dp_priv->output_reg, reg); - POSTING_READ(dp_priv->output_reg); - intel_dp_aux_native_write_1(intel_encoder, + I915_WRITE(intel_dp->output_reg, reg); + POSTING_READ(intel_dp->output_reg); + intel_dp_aux_native_write_1(intel_dp, DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); } static void -intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP) +intel_dp_link_down(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_encoder->enc.dev; + struct drm_device *dev = intel_dp->base.enc.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + uint32_t DP = intel_dp->DP; DRM_DEBUG_KMS("\n"); - if (IS_eDP(intel_encoder)) { + if (IS_eDP(intel_dp)) { DP &= ~DP_PLL_ENABLE; - I915_WRITE(dp_priv->output_reg, DP); - POSTING_READ(dp_priv->output_reg); + I915_WRITE(intel_dp->output_reg, DP); + POSTING_READ(intel_dp->output_reg); udelay(100); } - if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) { + if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) { DP &= ~DP_LINK_TRAIN_MASK_CPT; - I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); - POSTING_READ(dp_priv->output_reg); + I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); + POSTING_READ(intel_dp->output_reg); } else { DP &= ~DP_LINK_TRAIN_MASK; - I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); - POSTING_READ(dp_priv->output_reg); + I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); + POSTING_READ(intel_dp->output_reg); } udelay(17000); - if (IS_eDP(intel_encoder)) + if (IS_eDP(intel_dp)) DP |= DP_LINK_TRAIN_OFF; - I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN); - POSTING_READ(dp_priv->output_reg); + I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); + POSTING_READ(intel_dp->output_reg); } /* @@ -1309,41 +1343,39 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP) */ static void -intel_dp_check_link_status(struct intel_encoder *intel_encoder) +intel_dp_check_link_status(struct intel_dp *intel_dp) { - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; uint8_t link_status[DP_LINK_STATUS_SIZE]; - if (!intel_encoder->enc.crtc) + if (!intel_dp->base.enc.crtc) return; - if (!intel_dp_get_link_status(intel_encoder, link_status)) { - intel_dp_link_down(intel_encoder, dp_priv->DP); + if (!intel_dp_get_link_status(intel_dp, link_status)) { + intel_dp_link_down(intel_dp); return; } - if (!intel_channel_eq_ok(link_status, dp_priv->lane_count)) - intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); + if (!intel_channel_eq_ok(link_status, intel_dp->lane_count)) + intel_dp_link_train(intel_dp); } static enum drm_connector_status ironlake_dp_detect(struct drm_connector *connector) { struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); enum drm_connector_status status; status = connector_status_disconnected; - if (intel_dp_aux_native_read(intel_encoder, - 0x000, dp_priv->dpcd, - sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) + if (intel_dp_aux_native_read(intel_dp, + 0x000, intel_dp->dpcd, + sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) { - if (dp_priv->dpcd[0] != 0) + if (intel_dp->dpcd[0] != 0) status = connector_status_connected; } - DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", dp_priv->dpcd[0], - dp_priv->dpcd[1], dp_priv->dpcd[2], dp_priv->dpcd[3]); + DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0], + intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]); return status; } @@ -1357,19 +1389,18 @@ static enum drm_connector_status intel_dp_detect(struct drm_connector *connector) { struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct drm_device *dev = intel_encoder->enc.dev; + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct drm_device *dev = intel_dp->base.enc.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; uint32_t temp, bit; enum drm_connector_status status; - dp_priv->has_audio = false; + intel_dp->has_audio = false; if (HAS_PCH_SPLIT(dev)) return ironlake_dp_detect(connector); - switch (dp_priv->output_reg) { + switch (intel_dp->output_reg) { case DP_B: bit = DPB_HOTPLUG_INT_STATUS; break; @@ -1389,11 +1420,11 @@ intel_dp_detect(struct drm_connector *connector) return connector_status_disconnected; status = connector_status_disconnected; - if (intel_dp_aux_native_read(intel_encoder, - 0x000, dp_priv->dpcd, - sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) + if (intel_dp_aux_native_read(intel_dp, + 0x000, intel_dp->dpcd, + sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) { - if (dp_priv->dpcd[0] != 0) + if (intel_dp->dpcd[0] != 0) status = connector_status_connected; } return status; @@ -1402,18 +1433,17 @@ intel_dp_detect(struct drm_connector *connector) static int intel_dp_get_modes(struct drm_connector *connector) { struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct drm_device *dev = intel_encoder->enc.dev; + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct drm_device *dev = intel_dp->base.enc.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; int ret; /* We should parse the EDID data and find out if it has an audio sink */ - ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); + ret = intel_ddc_get_modes(connector, intel_dp->base.ddc_bus); if (ret) { - if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) && + if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && !dev_priv->panel_fixed_mode) { struct drm_display_mode *newmode; list_for_each_entry(newmode, &connector->probed_modes, @@ -1430,7 +1460,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) } /* if eDP has no EDID, try to use fixed panel mode from VBT */ - if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { + if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { if (dev_priv->panel_fixed_mode != NULL) { struct drm_display_mode *mode; mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); @@ -1452,9 +1482,9 @@ intel_dp_destroy (struct drm_connector *connector) static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { .dpms = intel_dp_dpms, .mode_fixup = intel_dp_mode_fixup, - .prepare = intel_encoder_prepare, + .prepare = intel_dp_prepare, .mode_set = intel_dp_mode_set, - .commit = intel_encoder_commit, + .commit = intel_dp_commit, }; static const struct drm_connector_funcs intel_dp_connector_funcs = { @@ -1470,27 +1500,17 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = .best_encoder = intel_attached_encoder, }; -static void intel_dp_enc_destroy(struct drm_encoder *encoder) -{ - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - - if (intel_encoder->i2c_bus) - intel_i2c_destroy(intel_encoder->i2c_bus); - drm_encoder_cleanup(encoder); - kfree(intel_encoder); -} - static const struct drm_encoder_funcs intel_dp_enc_funcs = { - .destroy = intel_dp_enc_destroy, + .destroy = intel_encoder_destroy, }; void intel_dp_hot_plug(struct intel_encoder *intel_encoder) { - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); - if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) - intel_dp_check_link_status(intel_encoder); + if (intel_dp->dpms_mode == DRM_MODE_DPMS_ON) + intel_dp_check_link_status(intel_dp); } /* Return which DP Port should be selected for Transcoder DP control */ @@ -1500,18 +1520,18 @@ intel_trans_dp_port_sel (struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_encoder *encoder; - struct intel_encoder *intel_encoder = NULL; list_for_each_entry(encoder, &mode_config->encoder_list, head) { + struct intel_dp *intel_dp; + if (encoder->crtc != crtc) continue; - intel_encoder = enc_to_intel_encoder(encoder); - if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { - struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; - return dp_priv->output_reg; - } + intel_dp = enc_to_intel_dp(encoder); + if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) + return intel_dp->output_reg; } + return -1; } @@ -1540,30 +1560,28 @@ intel_dp_init(struct drm_device *dev, int output_reg) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_connector *connector; + struct intel_dp *intel_dp; struct intel_encoder *intel_encoder; struct intel_connector *intel_connector; - struct intel_dp_priv *dp_priv; const char *name = NULL; int type; - intel_encoder = kcalloc(sizeof(struct intel_encoder) + - sizeof(struct intel_dp_priv), 1, GFP_KERNEL); - if (!intel_encoder) + intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL); + if (!intel_dp) return; intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); if (!intel_connector) { - kfree(intel_encoder); + kfree(intel_dp); return; } + intel_encoder = &intel_dp->base; - dp_priv = (struct intel_dp_priv *)(intel_encoder + 1); - - if (HAS_PCH_SPLIT(dev) && (output_reg == PCH_DP_D)) + if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D) if (intel_dpd_is_edp(dev)) - dp_priv->is_pch_edp = true; + intel_dp->is_pch_edp = true; - if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) { + if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) { type = DRM_MODE_CONNECTOR_eDP; intel_encoder->type = INTEL_OUTPUT_EDP; } else { @@ -1584,18 +1602,16 @@ intel_dp_init(struct drm_device *dev, int output_reg) else if (output_reg == DP_D || output_reg == PCH_DP_D) intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); - if (IS_eDP(intel_encoder)) + if (IS_eDP(intel_dp)) intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); intel_encoder->crtc_mask = (1 << 0) | (1 << 1); connector->interlace_allowed = true; connector->doublescan_allowed = 0; - dp_priv->intel_encoder = intel_encoder; - dp_priv->output_reg = output_reg; - dp_priv->has_audio = false; - dp_priv->dpms_mode = DRM_MODE_DPMS_ON; - intel_encoder->dev_priv = dp_priv; + intel_dp->output_reg = output_reg; + intel_dp->has_audio = false; + intel_dp->dpms_mode = DRM_MODE_DPMS_ON; drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS); @@ -1630,12 +1646,12 @@ intel_dp_init(struct drm_device *dev, int output_reg) break; } - intel_dp_i2c_init(intel_encoder, intel_connector, name); + intel_dp_i2c_init(intel_dp, intel_connector, name); - intel_encoder->ddc_bus = &dp_priv->adapter; + intel_encoder->ddc_bus = &intel_dp->adapter; intel_encoder->hot_plug = intel_dp_hot_plug; - if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) { + if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) { /* initialize panel mode from VBT if available for eDP */ if (dev_priv->lfp_lvds_vbt_mode) { dev_priv->panel_fixed_mode = diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index b2190148703..ad312ca6b3e 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -32,6 +32,20 @@ #include "drm_crtc.h" #include "drm_crtc_helper.h" + +#define wait_for(COND, MS, W) ({ \ + unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ + int ret__ = 0; \ + while (! (COND)) { \ + if (time_after(jiffies, timeout__)) { \ + ret__ = -ETIMEDOUT; \ + break; \ + } \ + if (W) msleep(W); \ + } \ + ret__; \ +}) + /* * Display related stuff */ @@ -102,7 +116,6 @@ struct intel_encoder { struct i2c_adapter *ddc_bus; bool load_detect_temp; bool needs_tv_clock; - void *dev_priv; void (*hot_plug)(struct intel_encoder *); int crtc_mask; int clone_mask; @@ -110,7 +123,6 @@ struct intel_encoder { struct intel_connector { struct drm_connector base; - void *dev_priv; }; struct intel_crtc; @@ -156,7 +168,7 @@ struct intel_crtc { uint32_t cursor_addr; int16_t cursor_x, cursor_y; int16_t cursor_width, cursor_height; - bool cursor_visble; + bool cursor_visible, cursor_on; }; #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) @@ -164,6 +176,16 @@ struct intel_crtc { #define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) +struct intel_unpin_work { + struct work_struct work; + struct drm_device *dev; + struct drm_gem_object *old_fb_obj; + struct drm_gem_object *pending_flip_obj; + struct drm_pending_vblank_event *event; + int pending; + bool enable_stall_check; +}; + struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, const char *name); void intel_i2c_destroy(struct i2c_adapter *adapter); @@ -188,10 +210,18 @@ extern bool intel_dpd_is_edp(struct drm_device *dev); extern void intel_edp_link_config (struct intel_encoder *, int *, int *); +extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, + struct drm_display_mode *adjusted_mode); +extern void intel_pch_panel_fitting(struct drm_device *dev, + int fitting_mode, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + extern int intel_panel_fitter_pipe (struct drm_device *dev); extern void intel_crtc_load_lut(struct drm_crtc *crtc); extern void intel_encoder_prepare (struct drm_encoder *encoder); extern void intel_encoder_commit (struct drm_encoder *encoder); +extern void intel_encoder_destroy(struct drm_encoder *encoder); extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector); @@ -199,7 +229,8 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc); int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern void intel_wait_for_vblank(struct drm_device *dev); +extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe); +extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, struct drm_connector *connector, diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 227feca7cf8..a399f4b2c1c 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -38,7 +38,7 @@ #define CH7xxx_ADDR 0x76 #define TFP410_ADDR 0x38 -static struct intel_dvo_device intel_dvo_devices[] = { +static const struct intel_dvo_device intel_dvo_devices[] = { { .type = INTEL_DVO_CHIP_TMDS, .name = "sil164", @@ -77,20 +77,33 @@ static struct intel_dvo_device intel_dvo_devices[] = { } }; +struct intel_dvo { + struct intel_encoder base; + + struct intel_dvo_device dev; + + struct drm_display_mode *panel_fixed_mode; + bool panel_wants_dither; +}; + +static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder) +{ + return container_of(enc_to_intel_encoder(encoder), struct intel_dvo, base); +} + static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) { struct drm_i915_private *dev_priv = encoder->dev->dev_private; - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_dvo_device *dvo = intel_encoder->dev_priv; - u32 dvo_reg = dvo->dvo_reg; + struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); + u32 dvo_reg = intel_dvo->dev.dvo_reg; u32 temp = I915_READ(dvo_reg); if (mode == DRM_MODE_DPMS_ON) { I915_WRITE(dvo_reg, temp | DVO_ENABLE); I915_READ(dvo_reg); - dvo->dev_ops->dpms(dvo, mode); + intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode); } else { - dvo->dev_ops->dpms(dvo, mode); + intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode); I915_WRITE(dvo_reg, temp & ~DVO_ENABLE); I915_READ(dvo_reg); } @@ -100,38 +113,36 @@ static int intel_dvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_dvo_device *dvo = intel_encoder->dev_priv; + struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; /* XXX: Validate clock range */ - if (dvo->panel_fixed_mode) { - if (mode->hdisplay > dvo->panel_fixed_mode->hdisplay) + if (intel_dvo->panel_fixed_mode) { + if (mode->hdisplay > intel_dvo->panel_fixed_mode->hdisplay) return MODE_PANEL; - if (mode->vdisplay > dvo->panel_fixed_mode->vdisplay) + if (mode->vdisplay > intel_dvo->panel_fixed_mode->vdisplay) return MODE_PANEL; } - return dvo->dev_ops->mode_valid(dvo, mode); + return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode); } static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_dvo_device *dvo = intel_encoder->dev_priv; + struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); /* If we have timings from the BIOS for the panel, put them in * to the adjusted mode. The CRTC will be set up for this mode, * with the panel scaling set up to source from the H/VDisplay * of the original mode. */ - if (dvo->panel_fixed_mode != NULL) { -#define C(x) adjusted_mode->x = dvo->panel_fixed_mode->x + if (intel_dvo->panel_fixed_mode != NULL) { +#define C(x) adjusted_mode->x = intel_dvo->panel_fixed_mode->x C(hdisplay); C(hsync_start); C(hsync_end); @@ -145,8 +156,8 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, #undef C } - if (dvo->dev_ops->mode_fixup) - return dvo->dev_ops->mode_fixup(dvo, mode, adjusted_mode); + if (intel_dvo->dev.dev_ops->mode_fixup) + return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, mode, adjusted_mode); return true; } @@ -158,11 +169,10 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_dvo_device *dvo = intel_encoder->dev_priv; + struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); int pipe = intel_crtc->pipe; u32 dvo_val; - u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg; + u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg; int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; switch (dvo_reg) { @@ -178,7 +188,7 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, break; } - dvo->dev_ops->mode_set(dvo, mode, adjusted_mode); + intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, mode, adjusted_mode); /* Save the data order, since I don't know what it should be set to. */ dvo_val = I915_READ(dvo_reg) & @@ -214,40 +224,38 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) { struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_dvo_device *dvo = intel_encoder->dev_priv; + struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); - return dvo->dev_ops->detect(dvo); + return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev); } static int intel_dvo_get_modes(struct drm_connector *connector) { struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_dvo_device *dvo = intel_encoder->dev_priv; + struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); /* We should probably have an i2c driver get_modes function for those * devices which will have a fixed set of modes determined by the chip * (TV-out, for example), but for now with just TMDS and LVDS, * that's not the case. */ - intel_ddc_get_modes(connector, intel_encoder->ddc_bus); + intel_ddc_get_modes(connector, intel_dvo->base.ddc_bus); if (!list_empty(&connector->probed_modes)) return 1; - - if (dvo->panel_fixed_mode != NULL) { + if (intel_dvo->panel_fixed_mode != NULL) { struct drm_display_mode *mode; - mode = drm_mode_duplicate(connector->dev, dvo->panel_fixed_mode); + mode = drm_mode_duplicate(connector->dev, intel_dvo->panel_fixed_mode); if (mode) { drm_mode_probed_add(connector, mode); return 1; } } + return 0; } -static void intel_dvo_destroy (struct drm_connector *connector) +static void intel_dvo_destroy(struct drm_connector *connector) { drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); @@ -277,28 +285,20 @@ static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs static void intel_dvo_enc_destroy(struct drm_encoder *encoder) { - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_dvo_device *dvo = intel_encoder->dev_priv; - - if (dvo) { - if (dvo->dev_ops->destroy) - dvo->dev_ops->destroy(dvo); - if (dvo->panel_fixed_mode) - kfree(dvo->panel_fixed_mode); - } - if (intel_encoder->i2c_bus) - intel_i2c_destroy(intel_encoder->i2c_bus); - if (intel_encoder->ddc_bus) - intel_i2c_destroy(intel_encoder->ddc_bus); - drm_encoder_cleanup(encoder); - kfree(intel_encoder); + struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); + + if (intel_dvo->dev.dev_ops->destroy) + intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev); + + kfree(intel_dvo->panel_fixed_mode); + + intel_encoder_destroy(encoder); } static const struct drm_encoder_funcs intel_dvo_enc_funcs = { .destroy = intel_dvo_enc_destroy, }; - /** * Attempts to get a fixed panel timing for LVDS (currently only the i830). * @@ -306,15 +306,13 @@ static const struct drm_encoder_funcs intel_dvo_enc_funcs = { * chip being on DVOB/C and having multiple pipes. */ static struct drm_display_mode * -intel_dvo_get_current_mode (struct drm_connector *connector) +intel_dvo_get_current_mode(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_dvo_device *dvo = intel_encoder->dev_priv; - uint32_t dvo_reg = dvo->dvo_reg; - uint32_t dvo_val = I915_READ(dvo_reg); + struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); + uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg); struct drm_display_mode *mode = NULL; /* If the DVO port is active, that'll be the LVDS, so we can pull out @@ -327,7 +325,6 @@ intel_dvo_get_current_mode (struct drm_connector *connector) crtc = intel_get_crtc_from_pipe(dev, pipe); if (crtc) { mode = intel_crtc_mode_get(dev, crtc); - if (mode) { mode->type |= DRM_MODE_TYPE_PREFERRED; if (dvo_val & DVO_HSYNC_ACTIVE_HIGH) @@ -337,28 +334,32 @@ intel_dvo_get_current_mode (struct drm_connector *connector) } } } + return mode; } void intel_dvo_init(struct drm_device *dev) { struct intel_encoder *intel_encoder; + struct intel_dvo *intel_dvo; struct intel_connector *intel_connector; - struct intel_dvo_device *dvo; struct i2c_adapter *i2cbus = NULL; int ret = 0; int i; int encoder_type = DRM_MODE_ENCODER_NONE; - intel_encoder = kzalloc (sizeof(struct intel_encoder), GFP_KERNEL); - if (!intel_encoder) + + intel_dvo = kzalloc(sizeof(struct intel_dvo), GFP_KERNEL); + if (!intel_dvo) return; intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); if (!intel_connector) { - kfree(intel_encoder); + kfree(intel_dvo); return; } + intel_encoder = &intel_dvo->base; + /* Set up the DDC bus */ intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); if (!intel_encoder->ddc_bus) @@ -367,10 +368,9 @@ void intel_dvo_init(struct drm_device *dev) /* Now, try to find a controller */ for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { struct drm_connector *connector = &intel_connector->base; + const struct intel_dvo_device *dvo = &intel_dvo_devices[i]; int gpio; - dvo = &intel_dvo_devices[i]; - /* Allow the I2C driver info to specify the GPIO to be used in * special cases, but otherwise default to what's defined * in the spec. @@ -393,11 +393,8 @@ void intel_dvo_init(struct drm_device *dev) continue; } - if (dvo->dev_ops!= NULL) - ret = dvo->dev_ops->init(dvo, i2cbus); - else - ret = false; - + intel_dvo->dev = *dvo; + ret = dvo->dev_ops->init(&intel_dvo->dev, i2cbus); if (!ret) continue; @@ -429,9 +426,6 @@ void intel_dvo_init(struct drm_device *dev) connector->interlace_allowed = false; connector->doublescan_allowed = false; - intel_encoder->dev_priv = dvo; - intel_encoder->i2c_bus = i2cbus; - drm_encoder_init(dev, &intel_encoder->enc, &intel_dvo_enc_funcs, encoder_type); drm_encoder_helper_add(&intel_encoder->enc, @@ -447,9 +441,9 @@ void intel_dvo_init(struct drm_device *dev) * headers, likely), so for now, just get the current * mode being output through DVO. */ - dvo->panel_fixed_mode = + intel_dvo->panel_fixed_mode = intel_dvo_get_current_mode(connector); - dvo->panel_wants_dither = true; + intel_dvo->panel_wants_dither = true; } drm_sysfs_connector_add(connector); @@ -461,6 +455,6 @@ void intel_dvo_init(struct drm_device *dev) if (i2cbus != NULL) intel_i2c_destroy(i2cbus); free_intel: - kfree(intel_encoder); + kfree(intel_dvo); kfree(intel_connector); } diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 197887ed182..ccd4c97e652 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -37,11 +37,17 @@ #include "i915_drm.h" #include "i915_drv.h" -struct intel_hdmi_priv { +struct intel_hdmi { + struct intel_encoder base; u32 sdvox_reg; bool has_hdmi_sink; }; +static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) +{ + return container_of(enc_to_intel_encoder(encoder), struct intel_hdmi, base); +} + static void intel_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -50,8 +56,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); u32 sdvox; sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE; @@ -60,7 +65,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) sdvox |= SDVO_HSYNC_ACTIVE_HIGH; - if (hdmi_priv->has_hdmi_sink) { + if (intel_hdmi->has_hdmi_sink) { sdvox |= SDVO_AUDIO_ENABLE; if (HAS_PCH_CPT(dev)) sdvox |= HDMI_MODE_SELECT; @@ -73,26 +78,25 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, sdvox |= SDVO_PIPE_B_SELECT; } - I915_WRITE(hdmi_priv->sdvox_reg, sdvox); - POSTING_READ(hdmi_priv->sdvox_reg); + I915_WRITE(intel_hdmi->sdvox_reg, sdvox); + POSTING_READ(intel_hdmi->sdvox_reg); } static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); u32 temp; - temp = I915_READ(hdmi_priv->sdvox_reg); + temp = I915_READ(intel_hdmi->sdvox_reg); /* HW workaround, need to toggle enable bit off and on for 12bpc, but * we do this anyway which shows more stable in testing. */ if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE); - POSTING_READ(hdmi_priv->sdvox_reg); + I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE); + POSTING_READ(intel_hdmi->sdvox_reg); } if (mode != DRM_MODE_DPMS_ON) { @@ -101,15 +105,15 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) temp |= SDVO_ENABLE; } - I915_WRITE(hdmi_priv->sdvox_reg, temp); - POSTING_READ(hdmi_priv->sdvox_reg); + I915_WRITE(intel_hdmi->sdvox_reg, temp); + POSTING_READ(intel_hdmi->sdvox_reg); /* HW workaround, need to write this twice for issue that may result * in first write getting masked. */ if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(hdmi_priv->sdvox_reg, temp); - POSTING_READ(hdmi_priv->sdvox_reg); + I915_WRITE(intel_hdmi->sdvox_reg, temp); + POSTING_READ(intel_hdmi->sdvox_reg); } } @@ -138,19 +142,17 @@ static enum drm_connector_status intel_hdmi_detect(struct drm_connector *connector) { struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); struct edid *edid = NULL; enum drm_connector_status status = connector_status_disconnected; - hdmi_priv->has_hdmi_sink = false; - edid = drm_get_edid(connector, - intel_encoder->ddc_bus); + intel_hdmi->has_hdmi_sink = false; + edid = drm_get_edid(connector, intel_hdmi->base.ddc_bus); if (edid) { if (edid->input & DRM_EDID_INPUT_DIGITAL) { status = connector_status_connected; - hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); + intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid); } connector->display_info.raw_edid = NULL; kfree(edid); @@ -162,13 +164,13 @@ intel_hdmi_detect(struct drm_connector *connector) static int intel_hdmi_get_modes(struct drm_connector *connector) { struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); /* We should parse the EDID data and find out if it's an HDMI sink so * we can send audio to it. */ - return intel_ddc_get_modes(connector, intel_encoder->ddc_bus); + return intel_ddc_get_modes(connector, intel_hdmi->base.ddc_bus); } static void intel_hdmi_destroy(struct drm_connector *connector) @@ -199,18 +201,8 @@ static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs .best_encoder = intel_attached_encoder, }; -static void intel_hdmi_enc_destroy(struct drm_encoder *encoder) -{ - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - - if (intel_encoder->i2c_bus) - intel_i2c_destroy(intel_encoder->i2c_bus); - drm_encoder_cleanup(encoder); - kfree(intel_encoder); -} - static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { - .destroy = intel_hdmi_enc_destroy, + .destroy = intel_encoder_destroy, }; void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) @@ -219,21 +211,19 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) struct drm_connector *connector; struct intel_encoder *intel_encoder; struct intel_connector *intel_connector; - struct intel_hdmi_priv *hdmi_priv; + struct intel_hdmi *intel_hdmi; - intel_encoder = kcalloc(sizeof(struct intel_encoder) + - sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); - if (!intel_encoder) + intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL); + if (!intel_hdmi) return; intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); if (!intel_connector) { - kfree(intel_encoder); + kfree(intel_hdmi); return; } - hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1); - + intel_encoder = &intel_hdmi->base; connector = &intel_connector->base; drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA); @@ -274,8 +264,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) if (!intel_encoder->ddc_bus) goto err_connector; - hdmi_priv->sdvox_reg = sdvox_reg; - intel_encoder->dev_priv = hdmi_priv; + intel_hdmi->sdvox_reg = sdvox_reg; drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS); @@ -298,7 +287,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) err_connector: drm_connector_cleanup(connector); - kfree(intel_encoder); + kfree(intel_hdmi); kfree(intel_connector); return; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 0a2e60059fb..b819c108114 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -41,12 +41,18 @@ #include <linux/acpi.h> /* Private structure for the integrated LVDS support */ -struct intel_lvds_priv { +struct intel_lvds { + struct intel_encoder base; int fitting_mode; u32 pfit_control; u32 pfit_pgm_ratios; }; +static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder) +{ + return container_of(enc_to_intel_encoder(encoder), struct intel_lvds, base); +} + /** * Sets the backlight level. * @@ -90,7 +96,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev) static void intel_lvds_set_power(struct drm_device *dev, bool on) { struct drm_i915_private *dev_priv = dev->dev_private; - u32 pp_status, ctl_reg, status_reg, lvds_reg; + u32 ctl_reg, status_reg, lvds_reg; if (HAS_PCH_SPLIT(dev)) { ctl_reg = PCH_PP_CONTROL; @@ -108,9 +114,8 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on) I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); - do { - pp_status = I915_READ(status_reg); - } while ((pp_status & PP_ON) == 0); + if (wait_for(I915_READ(status_reg) & PP_ON, 1000, 0)) + DRM_ERROR("timed out waiting to enable LVDS pipe"); intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle); } else { @@ -118,9 +123,8 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on) I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); - do { - pp_status = I915_READ(status_reg); - } while (pp_status & PP_ON); + if (wait_for((I915_READ(status_reg) & PP_ON) == 0, 1000, 0)) + DRM_ERROR("timed out waiting for LVDS pipe to turn off"); I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); POSTING_READ(lvds_reg); @@ -219,9 +223,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); struct drm_encoder *tmp_encoder; - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; /* Should never happen!! */ @@ -241,26 +244,20 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, /* If we don't have a panel mode, there is nothing we can do */ if (dev_priv->panel_fixed_mode == NULL) return true; + /* * We have timings from the BIOS for the panel, put them in * to the adjusted mode. The CRTC will be set up for this mode, * with the panel scaling set up to source from the H/VDisplay * of the original mode. */ - adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay; - adjusted_mode->hsync_start = - dev_priv->panel_fixed_mode->hsync_start; - adjusted_mode->hsync_end = - dev_priv->panel_fixed_mode->hsync_end; - adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal; - adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay; - adjusted_mode->vsync_start = - dev_priv->panel_fixed_mode->vsync_start; - adjusted_mode->vsync_end = - dev_priv->panel_fixed_mode->vsync_end; - adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal; - adjusted_mode->clock = dev_priv->panel_fixed_mode->clock; - drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); + intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode); + + if (HAS_PCH_SPLIT(dev)) { + intel_pch_panel_fitting(dev, intel_lvds->fitting_mode, + mode, adjusted_mode); + return true; + } /* Make sure pre-965s set dither correctly */ if (!IS_I965G(dev)) { @@ -273,10 +270,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, adjusted_mode->vdisplay == mode->vdisplay) goto out; - /* full screen scale for now */ - if (HAS_PCH_SPLIT(dev)) - goto out; - /* 965+ wants fuzzy fitting */ if (IS_I965G(dev)) pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | @@ -288,12 +281,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, * to register description and PRM. * Change the value here to see the borders for debugging */ - if (!HAS_PCH_SPLIT(dev)) { - I915_WRITE(BCLRPAT_A, 0); - I915_WRITE(BCLRPAT_B, 0); - } + I915_WRITE(BCLRPAT_A, 0); + I915_WRITE(BCLRPAT_B, 0); - switch (lvds_priv->fitting_mode) { + switch (intel_lvds->fitting_mode) { case DRM_MODE_SCALE_CENTER: /* * For centered modes, we have to calculate border widths & @@ -378,8 +369,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, } out: - lvds_priv->pfit_control = pfit_control; - lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios; + intel_lvds->pfit_control = pfit_control; + intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios; dev_priv->lvds_border_bits = border; /* @@ -427,8 +418,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; + struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); /* * The LVDS pin pair will already have been turned on in the @@ -444,8 +434,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, * screen. Should be enabled before the pipe is enabled, according to * register description and PRM. */ - I915_WRITE(PFIT_PGM_RATIOS, lvds_priv->pfit_pgm_ratios); - I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control); + I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); + I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); } /** @@ -600,18 +590,17 @@ static int intel_lvds_set_property(struct drm_connector *connector, connector->encoder) { struct drm_crtc *crtc = connector->encoder->crtc; struct drm_encoder *encoder = connector->encoder; - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; + struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); if (value == DRM_MODE_SCALE_NONE) { DRM_DEBUG_KMS("no scaling not supported\n"); return 0; } - if (lvds_priv->fitting_mode == value) { + if (intel_lvds->fitting_mode == value) { /* the LVDS scaling property is not changed */ return 0; } - lvds_priv->fitting_mode = value; + intel_lvds->fitting_mode = value; if (crtc && crtc->enabled) { /* * If the CRTC is enabled, the display will be changed @@ -647,19 +636,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = { .destroy = intel_lvds_destroy, }; - -static void intel_lvds_enc_destroy(struct drm_encoder *encoder) -{ - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - - if (intel_encoder->ddc_bus) - intel_i2c_destroy(intel_encoder->ddc_bus); - drm_encoder_cleanup(encoder); - kfree(intel_encoder); -} - static const struct drm_encoder_funcs intel_lvds_enc_funcs = { - .destroy = intel_lvds_enc_destroy, + .destroy = intel_encoder_destroy, }; static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) @@ -843,13 +821,13 @@ static int lvds_is_present_in_vbt(struct drm_device *dev) void intel_lvds_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_lvds *intel_lvds; struct intel_encoder *intel_encoder; struct intel_connector *intel_connector; struct drm_connector *connector; struct drm_encoder *encoder; struct drm_display_mode *scan; /* *modes, *bios_mode; */ struct drm_crtc *crtc; - struct intel_lvds_priv *lvds_priv; u32 lvds; int pipe, gpio = GPIOC; @@ -872,20 +850,20 @@ void intel_lvds_init(struct drm_device *dev) gpio = PCH_GPIOC; } - intel_encoder = kzalloc(sizeof(struct intel_encoder) + - sizeof(struct intel_lvds_priv), GFP_KERNEL); - if (!intel_encoder) { + intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); + if (!intel_lvds) { return; } intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); if (!intel_connector) { - kfree(intel_encoder); + kfree(intel_lvds); return; } - connector = &intel_connector->base; + intel_encoder = &intel_lvds->base; encoder = &intel_encoder->enc; + connector = &intel_connector->base; drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, DRM_MODE_CONNECTOR_LVDS); @@ -905,8 +883,6 @@ void intel_lvds_init(struct drm_device *dev) connector->interlace_allowed = false; connector->doublescan_allowed = false; - lvds_priv = (struct intel_lvds_priv *)(intel_encoder + 1); - intel_encoder->dev_priv = lvds_priv; /* create the scaling mode property */ drm_mode_create_scaling_mode_property(dev); /* @@ -916,7 +892,7 @@ void intel_lvds_init(struct drm_device *dev) drm_connector_attach_property(&intel_connector->base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_ASPECT); - lvds_priv->fitting_mode = DRM_MODE_SCALE_ASPECT; + intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT; /* * LVDS discovery: * 1) check for EDID on DDC @@ -1024,6 +1000,6 @@ failed: intel_i2c_destroy(intel_encoder->ddc_bus); drm_connector_cleanup(connector); drm_encoder_cleanup(encoder); - kfree(intel_encoder); + kfree(intel_lvds); kfree(intel_connector); } diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index d39aea24eab..1d306a458be 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -25,6 +25,8 @@ * * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c */ + +#include <linux/seq_file.h> #include "drmP.h" #include "drm.h" #include "i915_drm.h" @@ -1367,7 +1369,8 @@ void intel_setup_overlay(struct drm_device *dev) overlay->flip_addr = overlay->reg_bo->gtt_offset; } else { ret = i915_gem_attach_phys_object(dev, reg_bo, - I915_GEM_PHYS_OVERLAY_REGS); + I915_GEM_PHYS_OVERLAY_REGS, + 0); if (ret) { DRM_ERROR("failed to attach phys overlay regs\n"); goto out_free_bo; @@ -1416,3 +1419,99 @@ void intel_cleanup_overlay(struct drm_device *dev) kfree(dev_priv->overlay); } } + +struct intel_overlay_error_state { + struct overlay_registers regs; + unsigned long base; + u32 dovsta; + u32 isr; +}; + +struct intel_overlay_error_state * +intel_overlay_capture_error_state(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_overlay *overlay = dev_priv->overlay; + struct intel_overlay_error_state *error; + struct overlay_registers __iomem *regs; + + if (!overlay || !overlay->active) + return NULL; + + error = kmalloc(sizeof(*error), GFP_ATOMIC); + if (error == NULL) + return NULL; + + error->dovsta = I915_READ(DOVSTA); + error->isr = I915_READ(ISR); + if (OVERLAY_NONPHYSICAL(overlay->dev)) + error->base = (long) overlay->reg_bo->gtt_offset; + else + error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr; + + regs = intel_overlay_map_regs_atomic(overlay); + if (!regs) + goto err; + + memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers)); + intel_overlay_unmap_regs_atomic(overlay); + + return error; + +err: + kfree(error); + return NULL; +} + +void +intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error) +{ + seq_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n", + error->dovsta, error->isr); + seq_printf(m, " Register file at 0x%08lx:\n", + error->base); + +#define P(x) seq_printf(m, " " #x ": 0x%08x\n", error->regs.x) + P(OBUF_0Y); + P(OBUF_1Y); + P(OBUF_0U); + P(OBUF_0V); + P(OBUF_1U); + P(OBUF_1V); + P(OSTRIDE); + P(YRGB_VPH); + P(UV_VPH); + P(HORZ_PH); + P(INIT_PHS); + P(DWINPOS); + P(DWINSZ); + P(SWIDTH); + P(SWIDTHSW); + P(SHEIGHT); + P(YRGBSCALE); + P(UVSCALE); + P(OCLRC0); + P(OCLRC1); + P(DCLRKV); + P(DCLRKM); + P(SCLRKVH); + P(SCLRKVL); + P(SCLRKEN); + P(OCONFIG); + P(OCMD); + P(OSTART_0Y); + P(OSTART_1Y); + P(OSTART_0U); + P(OSTART_0V); + P(OSTART_1U); + P(OSTART_1V); + P(OTILEOFF_0Y); + P(OTILEOFF_1Y); + P(OTILEOFF_0U); + P(OTILEOFF_0V); + P(OTILEOFF_1U); + P(OTILEOFF_1V); + P(FASTHSCALE); + P(UVSCALEV); +#undef P +} diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c new file mode 100644 index 00000000000..e7f5299d9d5 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -0,0 +1,111 @@ +/* + * Copyright © 2006-2010 Intel Corporation + * Copyright (c) 2006 Dave Airlie <airlied@linux.ie> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Eric Anholt <eric@anholt.net> + * Dave Airlie <airlied@linux.ie> + * Jesse Barnes <jesse.barnes@intel.com> + * Chris Wilson <chris@chris-wilson.co.uk> + */ + +#include "intel_drv.h" + +void +intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, + struct drm_display_mode *adjusted_mode) +{ + adjusted_mode->hdisplay = fixed_mode->hdisplay; + adjusted_mode->hsync_start = fixed_mode->hsync_start; + adjusted_mode->hsync_end = fixed_mode->hsync_end; + adjusted_mode->htotal = fixed_mode->htotal; + + adjusted_mode->vdisplay = fixed_mode->vdisplay; + adjusted_mode->vsync_start = fixed_mode->vsync_start; + adjusted_mode->vsync_end = fixed_mode->vsync_end; + adjusted_mode->vtotal = fixed_mode->vtotal; + + adjusted_mode->clock = fixed_mode->clock; + + drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); +} + +/* adjusted_mode has been preset to be the panel's fixed mode */ +void +intel_pch_panel_fitting(struct drm_device *dev, + int fitting_mode, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int x, y, width, height; + + x = y = width = height = 0; + + /* Native modes don't need fitting */ + if (adjusted_mode->hdisplay == mode->hdisplay && + adjusted_mode->vdisplay == mode->vdisplay) + goto done; + + switch (fitting_mode) { + case DRM_MODE_SCALE_CENTER: + width = mode->hdisplay; + height = mode->vdisplay; + x = (adjusted_mode->hdisplay - width + 1)/2; + y = (adjusted_mode->vdisplay - height + 1)/2; + break; + + case DRM_MODE_SCALE_ASPECT: + /* Scale but preserve the aspect ratio */ + { + u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; + u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; + if (scaled_width > scaled_height) { /* pillar */ + width = scaled_height / mode->vdisplay; + x = (adjusted_mode->hdisplay - width + 1) / 2; + y = 0; + height = adjusted_mode->vdisplay; + } else if (scaled_width < scaled_height) { /* letter */ + height = scaled_width / mode->hdisplay; + y = (adjusted_mode->vdisplay - height + 1) / 2; + x = 0; + width = adjusted_mode->hdisplay; + } else { + x = y = 0; + width = adjusted_mode->hdisplay; + height = adjusted_mode->vdisplay; + } + } + break; + + default: + case DRM_MODE_SCALE_FULLSCREEN: + x = y = 0; + width = adjusted_mode->hdisplay; + height = adjusted_mode->vdisplay; + break; + } + +done: + dev_priv->pch_pf_pos = (x << 16) | y; + dev_priv->pch_pf_size = (width << 16) | height; +} diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 26362f8495a..cb3508f78bc 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -33,18 +33,35 @@ #include "i915_drm.h" #include "i915_trace.h" +static u32 i915_gem_get_seqno(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + u32 seqno; + + seqno = dev_priv->next_seqno; + + /* reserve 0 for non-seqno */ + if (++dev_priv->next_seqno == 0) + dev_priv->next_seqno = 1; + + return seqno; +} + static void render_ring_flush(struct drm_device *dev, struct intel_ring_buffer *ring, u32 invalidate_domains, u32 flush_domains) { + drm_i915_private_t *dev_priv = dev->dev_private; + u32 cmd; + #if WATCH_EXEC DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, invalidate_domains, flush_domains); #endif - u32 cmd; - trace_i915_gem_request_flush(dev, ring->next_seqno, + + trace_i915_gem_request_flush(dev, dev_priv->next_seqno, invalidate_domains, flush_domains); if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { @@ -203,9 +220,13 @@ static int init_render_ring(struct drm_device *dev, { drm_i915_private_t *dev_priv = dev->dev_private; int ret = init_ring_common(dev, ring); + int mode; + if (IS_I9XX(dev) && !IS_GEN3(dev)) { - I915_WRITE(MI_MODE, - (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); + mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; + if (IS_GEN6(dev)) + mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; + I915_WRITE(MI_MODE, mode); } return ret; } @@ -233,9 +254,10 @@ render_ring_add_request(struct drm_device *dev, struct drm_file *file_priv, u32 flush_domains) { - u32 seqno; drm_i915_private_t *dev_priv = dev->dev_private; - seqno = intel_ring_get_seqno(dev, ring); + u32 seqno; + + seqno = i915_gem_get_seqno(dev); if (IS_GEN6(dev)) { BEGIN_LP_RING(6); @@ -405,7 +427,9 @@ bsd_ring_add_request(struct drm_device *dev, u32 flush_domains) { u32 seqno; - seqno = intel_ring_get_seqno(dev, ring); + + seqno = i915_gem_get_seqno(dev); + intel_ring_begin(dev, ring, 4); intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); intel_ring_emit(dev, ring, @@ -479,7 +503,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev, exec_start = (uint32_t) exec_offset + exec->batch_start_offset; exec_len = (uint32_t) exec->batch_len; - trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1); + trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1); count = nbox ? nbox : 1; @@ -515,7 +539,16 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev, intel_ring_advance(dev, ring); } + if (IS_G4X(dev) || IS_IRONLAKE(dev)) { + intel_ring_begin(dev, ring, 2); + intel_ring_emit(dev, ring, MI_FLUSH | + MI_NO_WRITE_FLUSH | + MI_INVALIDATE_ISP ); + intel_ring_emit(dev, ring, MI_NOOP); + intel_ring_advance(dev, ring); + } /* XXX breadcrumb */ + return 0; } @@ -588,9 +621,10 @@ err: int intel_init_ring_buffer(struct drm_device *dev, struct intel_ring_buffer *ring) { - int ret; struct drm_i915_gem_object *obj_priv; struct drm_gem_object *obj; + int ret; + ring->dev = dev; if (I915_NEED_GFX_HWS(dev)) { @@ -603,16 +637,14 @@ int intel_init_ring_buffer(struct drm_device *dev, if (obj == NULL) { DRM_ERROR("Failed to allocate ringbuffer\n"); ret = -ENOMEM; - goto cleanup; + goto err_hws; } ring->gem_object = obj; ret = i915_gem_object_pin(obj, ring->alignment); - if (ret != 0) { - drm_gem_object_unreference(obj); - goto cleanup; - } + if (ret) + goto err_unref; obj_priv = to_intel_bo(obj); ring->map.size = ring->size; @@ -624,18 +656,14 @@ int intel_init_ring_buffer(struct drm_device *dev, drm_core_ioremap_wc(&ring->map, dev); if (ring->map.handle == NULL) { DRM_ERROR("Failed to map ringbuffer.\n"); - i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); ret = -EINVAL; - goto cleanup; + goto err_unpin; } ring->virtual_start = ring->map.handle; ret = ring->init(dev, ring); - if (ret != 0) { - intel_cleanup_ring_buffer(dev, ring); - return ret; - } + if (ret) + goto err_unmap; if (!drm_core_check_feature(dev, DRIVER_MODESET)) i915_kernel_lost_context(dev); @@ -649,7 +677,15 @@ int intel_init_ring_buffer(struct drm_device *dev, INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->request_list); return ret; -cleanup: + +err_unmap: + drm_core_ioremapfree(&ring->map, dev); +err_unpin: + i915_gem_object_unpin(obj); +err_unref: + drm_gem_object_unreference(obj); + ring->gem_object = NULL; +err_hws: cleanup_status_page(dev, ring); return ret; } @@ -682,9 +718,11 @@ int intel_wrap_ring_buffer(struct drm_device *dev, } virt = (unsigned int *)(ring->virtual_start + ring->tail); - rem /= 4; - while (rem--) + rem /= 8; + while (rem--) { + *virt++ = MI_NOOP; *virt++ = MI_NOOP; + } ring->tail = 0; ring->space = ring->head - 8; @@ -729,21 +767,14 @@ void intel_ring_begin(struct drm_device *dev, intel_wrap_ring_buffer(dev, ring); if (unlikely(ring->space < n)) intel_wait_ring_buffer(dev, ring, n); -} -void intel_ring_emit(struct drm_device *dev, - struct intel_ring_buffer *ring, unsigned int data) -{ - unsigned int *virt = ring->virtual_start + ring->tail; - *virt = data; - ring->tail += 4; - ring->tail &= ring->size - 1; - ring->space -= 4; + ring->space -= n; } void intel_ring_advance(struct drm_device *dev, struct intel_ring_buffer *ring) { + ring->tail &= ring->size - 1; ring->advance_ring(dev, ring); } @@ -762,18 +793,6 @@ void intel_fill_struct(struct drm_device *dev, intel_ring_advance(dev, ring); } -u32 intel_ring_get_seqno(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - u32 seqno; - seqno = ring->next_seqno; - - /* reserve 0 for non-seqno */ - if (++ring->next_seqno == 0) - ring->next_seqno = 1; - return seqno; -} - struct intel_ring_buffer render_ring = { .name = "render ring", .regs = { @@ -791,7 +810,6 @@ struct intel_ring_buffer render_ring = { .head = 0, .tail = 0, .space = 0, - .next_seqno = 1, .user_irq_refcount = 0, .irq_gem_seqno = 0, .waiting_gem_seqno = 0, @@ -830,7 +848,6 @@ struct intel_ring_buffer bsd_ring = { .head = 0, .tail = 0, .space = 0, - .next_seqno = 1, .user_irq_refcount = 0, .irq_gem_seqno = 0, .waiting_gem_seqno = 0, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index d5568d3766d..525e7d3edda 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -26,7 +26,6 @@ struct intel_ring_buffer { unsigned int head; unsigned int tail; unsigned int space; - u32 next_seqno; struct intel_hw_status_page status_page; u32 irq_gem_seqno; /* last seq seem at irq time */ @@ -106,8 +105,16 @@ int intel_wrap_ring_buffer(struct drm_device *dev, struct intel_ring_buffer *ring); void intel_ring_begin(struct drm_device *dev, struct intel_ring_buffer *ring, int n); -void intel_ring_emit(struct drm_device *dev, - struct intel_ring_buffer *ring, u32 data); + +static inline void intel_ring_emit(struct drm_device *dev, + struct intel_ring_buffer *ring, + unsigned int data) +{ + unsigned int *virt = ring->virtual_start + ring->tail; + *virt = data; + ring->tail += 4; +} + void intel_fill_struct(struct drm_device *dev, struct intel_ring_buffer *ring, void *data, diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index d9d4d51aa89..e3b7a7ee39c 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -31,8 +31,8 @@ #include "drmP.h" #include "drm.h" #include "drm_crtc.h" -#include "intel_drv.h" #include "drm_edid.h" +#include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" #include "intel_sdvo_regs.h" @@ -47,9 +47,10 @@ #define IS_TV(c) (c->output_flag & SDVO_TV_MASK) #define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) +#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) -static char *tv_format_names[] = { +static const char *tv_format_names[] = { "NTSC_M" , "NTSC_J" , "NTSC_443", "PAL_B" , "PAL_D" , "PAL_G" , "PAL_H" , "PAL_I" , "PAL_M" , @@ -61,7 +62,9 @@ static char *tv_format_names[] = { #define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names)) -struct intel_sdvo_priv { +struct intel_sdvo { + struct intel_encoder base; + u8 slave_addr; /* Register for the SDVO device: SDVOB or SDVOC */ @@ -95,7 +98,7 @@ struct intel_sdvo_priv { bool is_tv; /* This is for current tv format name */ - char *tv_format_name; + int tv_format_index; /** * This is set if we treat the device as HDMI, instead of DVI. @@ -132,37 +135,40 @@ struct intel_sdvo_priv { }; struct intel_sdvo_connector { + struct intel_connector base; + /* Mark the type of connector */ uint16_t output_flag; /* This contains all current supported TV format */ - char *tv_format_supported[TV_FORMAT_NUM]; + u8 tv_format_supported[TV_FORMAT_NUM]; int format_supported_num; - struct drm_property *tv_format_property; - struct drm_property *tv_format_name_property[TV_FORMAT_NUM]; - - /** - * Returned SDTV resolutions allowed for the current format, if the - * device reported it. - */ - struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions; + struct drm_property *tv_format; /* add the property for the SDVO-TV */ - struct drm_property *left_property; - struct drm_property *right_property; - struct drm_property *top_property; - struct drm_property *bottom_property; - struct drm_property *hpos_property; - struct drm_property *vpos_property; + struct drm_property *left; + struct drm_property *right; + struct drm_property *top; + struct drm_property *bottom; + struct drm_property *hpos; + struct drm_property *vpos; + struct drm_property *contrast; + struct drm_property *saturation; + struct drm_property *hue; + struct drm_property *sharpness; + struct drm_property *flicker_filter; + struct drm_property *flicker_filter_adaptive; + struct drm_property *flicker_filter_2d; + struct drm_property *tv_chroma_filter; + struct drm_property *tv_luma_filter; + struct drm_property *dot_crawl; /* add the property for the SDVO-TV/LVDS */ - struct drm_property *brightness_property; - struct drm_property *contrast_property; - struct drm_property *saturation_property; - struct drm_property *hue_property; + struct drm_property *brightness; /* Add variable to record current setting for the above property */ u32 left_margin, right_margin, top_margin, bottom_margin; + /* this is to get the range of margin.*/ u32 max_hscan, max_vscan; u32 max_hpos, cur_hpos; @@ -171,36 +177,54 @@ struct intel_sdvo_connector { u32 cur_contrast, max_contrast; u32 cur_saturation, max_saturation; u32 cur_hue, max_hue; + u32 cur_sharpness, max_sharpness; + u32 cur_flicker_filter, max_flicker_filter; + u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive; + u32 cur_flicker_filter_2d, max_flicker_filter_2d; + u32 cur_tv_chroma_filter, max_tv_chroma_filter; + u32 cur_tv_luma_filter, max_tv_luma_filter; + u32 cur_dot_crawl, max_dot_crawl; }; +static struct intel_sdvo *enc_to_intel_sdvo(struct drm_encoder *encoder) +{ + return container_of(enc_to_intel_encoder(encoder), struct intel_sdvo, base); +} + +static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector) +{ + return container_of(to_intel_connector(connector), struct intel_sdvo_connector, base); +} + static bool -intel_sdvo_output_setup(struct intel_encoder *intel_encoder, - uint16_t flags); -static void -intel_sdvo_tv_create_property(struct drm_connector *connector, int type); -static void -intel_sdvo_create_enhance_property(struct drm_connector *connector); +intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags); +static bool +intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, + struct intel_sdvo_connector *intel_sdvo_connector, + int type); +static bool +intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, + struct intel_sdvo_connector *intel_sdvo_connector); /** * Writes the SDVOB or SDVOC with the given value, but always writes both * SDVOB and SDVOC to work around apparent hardware issues (according to * comments in the BIOS). */ -static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val) +static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) { - struct drm_device *dev = intel_encoder->enc.dev; + struct drm_device *dev = intel_sdvo->base.enc.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; u32 bval = val, cval = val; int i; - if (sdvo_priv->sdvo_reg == PCH_SDVOB) { - I915_WRITE(sdvo_priv->sdvo_reg, val); - I915_READ(sdvo_priv->sdvo_reg); + if (intel_sdvo->sdvo_reg == PCH_SDVOB) { + I915_WRITE(intel_sdvo->sdvo_reg, val); + I915_READ(intel_sdvo->sdvo_reg); return; } - if (sdvo_priv->sdvo_reg == SDVOB) { + if (intel_sdvo->sdvo_reg == SDVOB) { cval = I915_READ(SDVOC); } else { bval = I915_READ(SDVOB); @@ -219,33 +243,27 @@ static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val) } } -static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr, - u8 *ch) +static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch) { - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; - u8 out_buf[2]; + u8 out_buf[2] = { addr, 0 }; u8 buf[2]; - int ret; - struct i2c_msg msgs[] = { { - .addr = sdvo_priv->slave_addr >> 1, + .addr = intel_sdvo->slave_addr >> 1, .flags = 0, .len = 1, .buf = out_buf, }, { - .addr = sdvo_priv->slave_addr >> 1, + .addr = intel_sdvo->slave_addr >> 1, .flags = I2C_M_RD, .len = 1, .buf = buf, } }; + int ret; - out_buf[0] = addr; - out_buf[1] = 0; - - if ((ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 2)) == 2) + if ((ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 2)) == 2) { *ch = buf[0]; return true; @@ -255,35 +273,26 @@ static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr, return false; } -static bool intel_sdvo_write_byte(struct intel_encoder *intel_encoder, int addr, - u8 ch) +static bool intel_sdvo_write_byte(struct intel_sdvo *intel_sdvo, int addr, u8 ch) { - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; - u8 out_buf[2]; + u8 out_buf[2] = { addr, ch }; struct i2c_msg msgs[] = { { - .addr = sdvo_priv->slave_addr >> 1, + .addr = intel_sdvo->slave_addr >> 1, .flags = 0, .len = 2, .buf = out_buf, } }; - out_buf[0] = addr; - out_buf[1] = ch; - - if (i2c_transfer(intel_encoder->i2c_bus, msgs, 1) == 1) - { - return true; - } - return false; + return i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 1) == 1; } #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} /** Mapping of command numbers to names, for debug output */ static const struct _sdvo_cmd_name { u8 cmd; - char *name; + const char *name; } sdvo_cmd_names[] = { SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), @@ -328,13 +337,14 @@ static const struct _sdvo_cmd_name { SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS), + /* Add the op code for SDVO enhancements */ - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_H), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_H), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_H), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_V), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_V), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_V), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION), @@ -353,6 +363,27 @@ static const struct _sdvo_cmd_name { SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER), + /* HDMI op code */ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE), @@ -377,17 +408,15 @@ static const struct _sdvo_cmd_name { }; #define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB) -#define SDVO_NAME(dev_priv) (IS_SDVOB((dev_priv)->sdvo_reg) ? "SDVOB" : "SDVOC") -#define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv) +#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC") -static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd, - void *args, int args_len) +static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, + const void *args, int args_len) { - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; int i; DRM_DEBUG_KMS("%s: W: %02X ", - SDVO_NAME(sdvo_priv), cmd); + SDVO_NAME(intel_sdvo), cmd); for (i = 0; i < args_len; i++) DRM_LOG_KMS("%02X ", ((u8 *)args)[i]); for (; i < 8; i++) @@ -403,19 +432,20 @@ static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd, DRM_LOG_KMS("\n"); } -static void intel_sdvo_write_cmd(struct intel_encoder *intel_encoder, u8 cmd, - void *args, int args_len) +static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, + const void *args, int args_len) { int i; - intel_sdvo_debug_write(intel_encoder, cmd, args, args_len); + intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len); for (i = 0; i < args_len; i++) { - intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0 - i, - ((u8*)args)[i]); + if (!intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0 - i, + ((u8*)args)[i])) + return false; } - intel_sdvo_write_byte(intel_encoder, SDVO_I2C_OPCODE, cmd); + return intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_OPCODE, cmd); } static const char *cmd_status_names[] = { @@ -428,14 +458,13 @@ static const char *cmd_status_names[] = { "Scaling not supported" }; -static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder, +static void intel_sdvo_debug_response(struct intel_sdvo *intel_sdvo, void *response, int response_len, u8 status) { - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; int i; - DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv)); + DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo)); for (i = 0; i < response_len; i++) DRM_LOG_KMS("%02X ", ((u8 *)response)[i]); for (; i < 8; i++) @@ -447,8 +476,8 @@ static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder, DRM_LOG_KMS("\n"); } -static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder, - void *response, int response_len) +static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, + void *response, int response_len) { int i; u8 status; @@ -457,24 +486,26 @@ static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder, while (retry--) { /* Read the command response */ for (i = 0; i < response_len; i++) { - intel_sdvo_read_byte(intel_encoder, - SDVO_I2C_RETURN_0 + i, - &((u8 *)response)[i]); + if (!intel_sdvo_read_byte(intel_sdvo, + SDVO_I2C_RETURN_0 + i, + &((u8 *)response)[i])) + return false; } /* read the return status */ - intel_sdvo_read_byte(intel_encoder, SDVO_I2C_CMD_STATUS, - &status); + if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS, + &status)) + return false; - intel_sdvo_debug_response(intel_encoder, response, response_len, + intel_sdvo_debug_response(intel_sdvo, response, response_len, status); if (status != SDVO_CMD_STATUS_PENDING) - return status; + break; mdelay(50); } - return status; + return status == SDVO_CMD_STATUS_SUCCESS; } static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) @@ -494,37 +525,36 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) * another I2C transaction after issuing the DDC bus switch, it will be * switched to the internal SDVO register. */ -static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encoder, +static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, u8 target) { - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; u8 out_buf[2], cmd_buf[2], ret_value[2], ret; struct i2c_msg msgs[] = { { - .addr = sdvo_priv->slave_addr >> 1, + .addr = intel_sdvo->slave_addr >> 1, .flags = 0, .len = 2, .buf = out_buf, }, /* the following two are to read the response */ { - .addr = sdvo_priv->slave_addr >> 1, + .addr = intel_sdvo->slave_addr >> 1, .flags = 0, .len = 1, .buf = cmd_buf, }, { - .addr = sdvo_priv->slave_addr >> 1, + .addr = intel_sdvo->slave_addr >> 1, .flags = I2C_M_RD, .len = 1, .buf = ret_value, }, }; - intel_sdvo_debug_write(intel_encoder, SDVO_CMD_SET_CONTROL_BUS_SWITCH, + intel_sdvo_debug_write(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1); /* write the DDC switch command argument */ - intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0, target); + intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0, target); out_buf[0] = SDVO_I2C_OPCODE; out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; @@ -533,7 +563,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encode ret_value[0] = 0; ret_value[1] = 0; - ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 3); + ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 3); if (ret != 3) { /* failure in I2C transfer */ DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); @@ -547,23 +577,29 @@ static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encode return; } -static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, bool target_0, bool target_1) +static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) { - struct intel_sdvo_set_target_input_args targets = {0}; - u8 status; - - if (target_0 && target_1) - return SDVO_CMD_STATUS_NOTSUPP; + if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len)) + return false; - if (target_1) - targets.target_1 = 1; + return intel_sdvo_read_response(intel_sdvo, NULL, 0); +} - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_INPUT, &targets, - sizeof(targets)); +static bool +intel_sdvo_get_value(struct intel_sdvo *intel_sdvo, u8 cmd, void *value, int len) +{ + if (!intel_sdvo_write_cmd(intel_sdvo, cmd, NULL, 0)) + return false; - status = intel_sdvo_read_response(intel_encoder, NULL, 0); + return intel_sdvo_read_response(intel_sdvo, value, len); +} - return (status == SDVO_CMD_STATUS_SUCCESS); +static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo) +{ + struct intel_sdvo_set_target_input_args targets = {0}; + return intel_sdvo_set_value(intel_sdvo, + SDVO_CMD_SET_TARGET_INPUT, + &targets, sizeof(targets)); } /** @@ -572,14 +608,12 @@ static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, boo * This function is making an assumption about the layout of the response, * which should be checked against the docs. */ -static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, bool *input_1, bool *input_2) +static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *input_1, bool *input_2) { struct intel_sdvo_get_trained_inputs_response response; - u8 status; - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0); - status = intel_sdvo_read_response(intel_encoder, &response, sizeof(response)); - if (status != SDVO_CMD_STATUS_SUCCESS) + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS, + &response, sizeof(response))) return false; *input_1 = response.input0_trained; @@ -587,21 +621,18 @@ static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, b return true; } -static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder, +static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo, u16 outputs) { - u8 status; - - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, - sizeof(outputs)); - status = intel_sdvo_read_response(intel_encoder, NULL, 0); - return (status == SDVO_CMD_STATUS_SUCCESS); + return intel_sdvo_set_value(intel_sdvo, + SDVO_CMD_SET_ACTIVE_OUTPUTS, + &outputs, sizeof(outputs)); } -static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encoder, +static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo, int mode) { - u8 status, state = SDVO_ENCODER_STATE_ON; + u8 state = SDVO_ENCODER_STATE_ON; switch (mode) { case DRM_MODE_DPMS_ON: @@ -618,88 +649,63 @@ static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encod break; } - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, - sizeof(state)); - status = intel_sdvo_read_response(intel_encoder, NULL, 0); - - return (status == SDVO_CMD_STATUS_SUCCESS); + return intel_sdvo_set_value(intel_sdvo, + SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state)); } -static bool intel_sdvo_get_input_pixel_clock_range(struct intel_encoder *intel_encoder, +static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo, int *clock_min, int *clock_max) { struct intel_sdvo_pixel_clock_range clocks; - u8 status; - - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, - NULL, 0); - - status = intel_sdvo_read_response(intel_encoder, &clocks, sizeof(clocks)); - if (status != SDVO_CMD_STATUS_SUCCESS) + if (!intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, + &clocks, sizeof(clocks))) return false; /* Convert the values from units of 10 kHz to kHz. */ *clock_min = clocks.min * 10; *clock_max = clocks.max * 10; - return true; } -static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder, +static bool intel_sdvo_set_target_output(struct intel_sdvo *intel_sdvo, u16 outputs) { - u8 status; - - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, - sizeof(outputs)); - - status = intel_sdvo_read_response(intel_encoder, NULL, 0); - return (status == SDVO_CMD_STATUS_SUCCESS); + return intel_sdvo_set_value(intel_sdvo, + SDVO_CMD_SET_TARGET_OUTPUT, + &outputs, sizeof(outputs)); } -static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd, +static bool intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo, u8 cmd, struct intel_sdvo_dtd *dtd) { - u8 status; - - intel_sdvo_write_cmd(intel_encoder, cmd, &dtd->part1, sizeof(dtd->part1)); - status = intel_sdvo_read_response(intel_encoder, NULL, 0); - if (status != SDVO_CMD_STATUS_SUCCESS) - return false; - - intel_sdvo_write_cmd(intel_encoder, cmd + 1, &dtd->part2, sizeof(dtd->part2)); - status = intel_sdvo_read_response(intel_encoder, NULL, 0); - if (status != SDVO_CMD_STATUS_SUCCESS) - return false; - - return true; + return intel_sdvo_set_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) && + intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2)); } -static bool intel_sdvo_set_input_timing(struct intel_encoder *intel_encoder, +static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo, struct intel_sdvo_dtd *dtd) { - return intel_sdvo_set_timing(intel_encoder, + return intel_sdvo_set_timing(intel_sdvo, SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); } -static bool intel_sdvo_set_output_timing(struct intel_encoder *intel_encoder, +static bool intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo, struct intel_sdvo_dtd *dtd) { - return intel_sdvo_set_timing(intel_encoder, + return intel_sdvo_set_timing(intel_sdvo, SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); } static bool -intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder, +intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo, uint16_t clock, uint16_t width, uint16_t height) { struct intel_sdvo_preferred_input_timing_args args; - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; - uint8_t status; memset(&args, 0, sizeof(args)); args.clock = clock; @@ -707,59 +713,32 @@ intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder, args.height = height; args.interlace = 0; - if (sdvo_priv->is_lvds && - (sdvo_priv->sdvo_lvds_fixed_mode->hdisplay != width || - sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height)) + if (intel_sdvo->is_lvds && + (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width || + intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height)) args.scaled = 1; - intel_sdvo_write_cmd(intel_encoder, - SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, - &args, sizeof(args)); - status = intel_sdvo_read_response(intel_encoder, NULL, 0); - if (status != SDVO_CMD_STATUS_SUCCESS) - return false; - - return true; + return intel_sdvo_set_value(intel_sdvo, + SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, + &args, sizeof(args)); } -static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_encoder, +static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo, struct intel_sdvo_dtd *dtd) { - bool status; - - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, - NULL, 0); - - status = intel_sdvo_read_response(intel_encoder, &dtd->part1, - sizeof(dtd->part1)); - if (status != SDVO_CMD_STATUS_SUCCESS) - return false; - - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, - NULL, 0); - - status = intel_sdvo_read_response(intel_encoder, &dtd->part2, - sizeof(dtd->part2)); - if (status != SDVO_CMD_STATUS_SUCCESS) - return false; - - return false; + return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, + &dtd->part1, sizeof(dtd->part1)) && + intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, + &dtd->part2, sizeof(dtd->part2)); } -static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val) +static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val) { - u8 status; - - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); - status = intel_sdvo_read_response(intel_encoder, NULL, 0); - if (status != SDVO_CMD_STATUS_SUCCESS) - return false; - - return true; + return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); } static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { uint16_t width, height; uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; @@ -808,7 +787,7 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, } static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, - struct intel_sdvo_dtd *dtd) + const struct intel_sdvo_dtd *dtd) { mode->hdisplay = dtd->part1.h_active; mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8; @@ -840,45 +819,33 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, mode->flags |= DRM_MODE_FLAG_PVSYNC; } -static bool intel_sdvo_get_supp_encode(struct intel_encoder *intel_encoder, +static bool intel_sdvo_get_supp_encode(struct intel_sdvo *intel_sdvo, struct intel_sdvo_encode *encode) { - uint8_t status; - - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0); - status = intel_sdvo_read_response(intel_encoder, encode, sizeof(*encode)); - if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */ - memset(encode, 0, sizeof(*encode)); - return false; - } + if (intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_SUPP_ENCODE, + encode, sizeof(*encode))) + return true; - return true; + /* non-support means DVI */ + memset(encode, 0, sizeof(*encode)); + return false; } -static bool intel_sdvo_set_encode(struct intel_encoder *intel_encoder, +static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo, uint8_t mode) { - uint8_t status; - - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODE, &mode, 1); - status = intel_sdvo_read_response(intel_encoder, NULL, 0); - - return (status == SDVO_CMD_STATUS_SUCCESS); + return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1); } -static bool intel_sdvo_set_colorimetry(struct intel_encoder *intel_encoder, +static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo, uint8_t mode) { - uint8_t status; - - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_COLORIMETRY, &mode, 1); - status = intel_sdvo_read_response(intel_encoder, NULL, 0); - - return (status == SDVO_CMD_STATUS_SUCCESS); + return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1); } #if 0 -static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder) +static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) { int i, j; uint8_t set_buf_index[2]; @@ -887,8 +854,7 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder) uint8_t buf[48]; uint8_t *pos; - intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0); - intel_sdvo_read_response(encoder, &av_split, 1); + intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1); for (i = 0; i <= av_split; i++) { set_buf_index[0] = i; set_buf_index[1] = 0; @@ -908,7 +874,7 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder) } #endif -static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder, +static bool intel_sdvo_set_hdmi_buf(struct intel_sdvo *intel_sdvo, int index, uint8_t *data, int8_t size, uint8_t tx_rate) { @@ -917,15 +883,18 @@ static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder, set_buf_index[0] = index; set_buf_index[1] = 0; - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_INDEX, - set_buf_index, 2); + if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, + set_buf_index, 2)) + return false; for (; size > 0; size -= 8) { - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_DATA, data, 8); + if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8)) + return false; + data += 8; } - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); + return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); } static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size) @@ -1000,7 +969,7 @@ struct dip_infoframe { } __attribute__ ((packed)) u; } __attribute__((packed)); -static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder, +static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, struct drm_display_mode * mode) { struct dip_infoframe avi_if = { @@ -1011,133 +980,107 @@ static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder, avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, 4 + avi_if.len); - intel_sdvo_set_hdmi_buf(intel_encoder, 1, (uint8_t *)&avi_if, - 4 + avi_if.len, - SDVO_HBUF_TX_VSYNC); + return intel_sdvo_set_hdmi_buf(intel_sdvo, 1, (uint8_t *)&avi_if, + 4 + avi_if.len, + SDVO_HBUF_TX_VSYNC); } -static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder) +static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo) { - struct intel_sdvo_tv_format format; - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; - uint32_t format_map, i; - uint8_t status; - - for (i = 0; i < TV_FORMAT_NUM; i++) - if (tv_format_names[i] == sdvo_priv->tv_format_name) - break; + uint32_t format_map; - format_map = 1 << i; + format_map = 1 << intel_sdvo->tv_format_index; memset(&format, 0, sizeof(format)); - memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ? - sizeof(format) : sizeof(format_map)); + memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map))); - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format, - sizeof(format)); - - status = intel_sdvo_read_response(intel_encoder, NULL, 0); - if (status != SDVO_CMD_STATUS_SUCCESS) - DRM_DEBUG_KMS("%s: Failed to set TV format\n", - SDVO_NAME(sdvo_priv)); + BUILD_BUG_ON(sizeof(format) != 6); + return intel_sdvo_set_value(intel_sdvo, + SDVO_CMD_SET_TV_FORMAT, + &format, sizeof(format)); } -static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static bool +intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo, + struct drm_display_mode *mode) { - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_sdvo_priv *dev_priv = intel_encoder->dev_priv; + struct intel_sdvo_dtd output_dtd; - if (dev_priv->is_tv) { - struct intel_sdvo_dtd output_dtd; - bool success; + if (!intel_sdvo_set_target_output(intel_sdvo, + intel_sdvo->attached_output)) + return false; - /* We need to construct preferred input timings based on our - * output timings. To do that, we have to set the output - * timings, even though this isn't really the right place in - * the sequence to do it. Oh well. - */ + intel_sdvo_get_dtd_from_mode(&output_dtd, mode); + if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd)) + return false; + return true; +} - /* Set output timings */ - intel_sdvo_get_dtd_from_mode(&output_dtd, mode); - intel_sdvo_set_target_output(intel_encoder, - dev_priv->attached_output); - intel_sdvo_set_output_timing(intel_encoder, &output_dtd); +static bool +intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct intel_sdvo_dtd input_dtd; - /* Set the input timing to the screen. Assume always input 0. */ - intel_sdvo_set_target_input(intel_encoder, true, false); + /* Reset the input timing to the screen. Assume always input 0. */ + if (!intel_sdvo_set_target_input(intel_sdvo)) + return false; + if (!intel_sdvo_create_preferred_input_timing(intel_sdvo, + mode->clock / 10, + mode->hdisplay, + mode->vdisplay)) + return false; - success = intel_sdvo_create_preferred_input_timing(intel_encoder, - mode->clock / 10, - mode->hdisplay, - mode->vdisplay); - if (success) { - struct intel_sdvo_dtd input_dtd; + if (!intel_sdvo_get_preferred_input_timing(intel_sdvo, + &input_dtd)) + return false; - intel_sdvo_get_preferred_input_timing(intel_encoder, - &input_dtd); - intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); - dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; + intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); + intel_sdvo->sdvo_flags = input_dtd.part2.sdvo_flags; - drm_mode_set_crtcinfo(adjusted_mode, 0); + drm_mode_set_crtcinfo(adjusted_mode, 0); + mode->clock = adjusted_mode->clock; + return true; +} - mode->clock = adjusted_mode->clock; +static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); - adjusted_mode->clock *= - intel_sdvo_get_pixel_multiplier(mode); - } else { + /* We need to construct preferred input timings based on our + * output timings. To do that, we have to set the output + * timings, even though this isn't really the right place in + * the sequence to do it. Oh well. + */ + if (intel_sdvo->is_tv) { + if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode)) return false; - } - } else if (dev_priv->is_lvds) { - struct intel_sdvo_dtd output_dtd; - bool success; - - drm_mode_set_crtcinfo(dev_priv->sdvo_lvds_fixed_mode, 0); - /* Set output timings */ - intel_sdvo_get_dtd_from_mode(&output_dtd, - dev_priv->sdvo_lvds_fixed_mode); - - intel_sdvo_set_target_output(intel_encoder, - dev_priv->attached_output); - intel_sdvo_set_output_timing(intel_encoder, &output_dtd); - - /* Set the input timing to the screen. Assume always input 0. */ - intel_sdvo_set_target_input(intel_encoder, true, false); - - - success = intel_sdvo_create_preferred_input_timing( - intel_encoder, - mode->clock / 10, - mode->hdisplay, - mode->vdisplay); - - if (success) { - struct intel_sdvo_dtd input_dtd; - - intel_sdvo_get_preferred_input_timing(intel_encoder, - &input_dtd); - intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); - dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; - drm_mode_set_crtcinfo(adjusted_mode, 0); + (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo, + mode, + adjusted_mode); + } else if (intel_sdvo->is_lvds) { + drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0); - mode->clock = adjusted_mode->clock; - - adjusted_mode->clock *= - intel_sdvo_get_pixel_multiplier(mode); - } else { + if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, + intel_sdvo->sdvo_lvds_fixed_mode)) return false; - } - } else { - /* Make the CRTC code factor in the SDVO pixel multiplier. The - * SDVO device will be told of the multiplier during mode_set. - */ - adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode); + (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo, + mode, + adjusted_mode); } + + /* Make the CRTC code factor in the SDVO pixel multiplier. The + * SDVO device will be told of the multiplier during mode_set. + */ + adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode); + return true; } @@ -1149,13 +1092,11 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); u32 sdvox = 0; - int sdvo_pixel_multiply; + int sdvo_pixel_multiply, rate; struct intel_sdvo_in_out_map in_out; struct intel_sdvo_dtd input_dtd; - u8 status; if (!mode) return; @@ -1166,41 +1107,46 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, * channel on the motherboard. In a two-input device, the first input * will be SDVOB and the second SDVOC. */ - in_out.in0 = sdvo_priv->attached_output; + in_out.in0 = intel_sdvo->attached_output; in_out.in1 = 0; - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP, + intel_sdvo_set_value(intel_sdvo, + SDVO_CMD_SET_IN_OUT_MAP, &in_out, sizeof(in_out)); - status = intel_sdvo_read_response(intel_encoder, NULL, 0); - if (sdvo_priv->is_hdmi) { - intel_sdvo_set_avi_infoframe(intel_encoder, mode); + if (intel_sdvo->is_hdmi) { + if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode)) + return; + sdvox |= SDVO_AUDIO_ENABLE; } /* We have tried to get input timing in mode_fixup, and filled into adjusted_mode */ - if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { - intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); - input_dtd.part2.sdvo_flags = sdvo_priv->sdvo_flags; - } else - intel_sdvo_get_dtd_from_mode(&input_dtd, mode); + intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); + if (intel_sdvo->is_tv || intel_sdvo->is_lvds) + input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags; /* If it's a TV, we already set the output timing in mode_fixup. * Otherwise, the output timing is equal to the input timing. */ - if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { + if (!intel_sdvo->is_tv && !intel_sdvo->is_lvds) { /* Set the output timing to the screen */ - intel_sdvo_set_target_output(intel_encoder, - sdvo_priv->attached_output); - intel_sdvo_set_output_timing(intel_encoder, &input_dtd); + if (!intel_sdvo_set_target_output(intel_sdvo, + intel_sdvo->attached_output)) + return; + + (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd); } /* Set the input timing to the screen. Assume always input 0. */ - intel_sdvo_set_target_input(intel_encoder, true, false); + if (!intel_sdvo_set_target_input(intel_sdvo)) + return; - if (sdvo_priv->is_tv) - intel_sdvo_set_tv_format(intel_encoder); + if (intel_sdvo->is_tv) { + if (!intel_sdvo_set_tv_format(intel_sdvo)) + return; + } /* We would like to use intel_sdvo_create_preferred_input_timing() to * provide the device with a timing it can support, if it supports that @@ -1217,23 +1163,17 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, intel_sdvo_set_input_timing(encoder, &input_dtd); } #else - intel_sdvo_set_input_timing(intel_encoder, &input_dtd); + (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd); #endif - switch (intel_sdvo_get_pixel_multiplier(mode)) { - case 1: - intel_sdvo_set_clock_rate_mult(intel_encoder, - SDVO_CLOCK_RATE_MULT_1X); - break; - case 2: - intel_sdvo_set_clock_rate_mult(intel_encoder, - SDVO_CLOCK_RATE_MULT_2X); - break; - case 4: - intel_sdvo_set_clock_rate_mult(intel_encoder, - SDVO_CLOCK_RATE_MULT_4X); - break; + sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode); + switch (sdvo_pixel_multiply) { + case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; + case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; + case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; } + if (!intel_sdvo_set_clock_rate_mult(intel_sdvo, rate)) + return; /* Set the SDVO control regs. */ if (IS_I965G(dev)) { @@ -1243,8 +1183,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) sdvox |= SDVO_HSYNC_ACTIVE_HIGH; } else { - sdvox |= I915_READ(sdvo_priv->sdvo_reg); - switch (sdvo_priv->sdvo_reg) { + sdvox |= I915_READ(intel_sdvo->sdvo_reg); + switch (intel_sdvo->sdvo_reg) { case SDVOB: sdvox &= SDVOB_PRESERVE_MASK; break; @@ -1257,7 +1197,6 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, if (intel_crtc->pipe == 1) sdvox |= SDVO_PIPE_B_SELECT; - sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode); if (IS_I965G(dev)) { /* done in crtc_mode_set as the dpll_md reg must be written early */ } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { @@ -1266,28 +1205,28 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT; } - if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL) + if (intel_sdvo->sdvo_flags & SDVO_NEED_TO_STALL) sdvox |= SDVO_STALL_SELECT; - intel_sdvo_write_sdvox(intel_encoder, sdvox); + intel_sdvo_write_sdvox(intel_sdvo, sdvox); } static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); u32 temp; if (mode != DRM_MODE_DPMS_ON) { - intel_sdvo_set_active_outputs(intel_encoder, 0); + intel_sdvo_set_active_outputs(intel_sdvo, 0); if (0) - intel_sdvo_set_encoder_power_state(intel_encoder, mode); + intel_sdvo_set_encoder_power_state(intel_sdvo, mode); if (mode == DRM_MODE_DPMS_OFF) { - temp = I915_READ(sdvo_priv->sdvo_reg); + temp = I915_READ(intel_sdvo->sdvo_reg); if ((temp & SDVO_ENABLE) != 0) { - intel_sdvo_write_sdvox(intel_encoder, temp & ~SDVO_ENABLE); + intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE); } } } else { @@ -1295,28 +1234,25 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) int i; u8 status; - temp = I915_READ(sdvo_priv->sdvo_reg); + temp = I915_READ(intel_sdvo->sdvo_reg); if ((temp & SDVO_ENABLE) == 0) - intel_sdvo_write_sdvox(intel_encoder, temp | SDVO_ENABLE); + intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE); for (i = 0; i < 2; i++) - intel_wait_for_vblank(dev); - - status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, - &input2); - + intel_wait_for_vblank(dev, intel_crtc->pipe); + status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2); /* Warn if the device reported failure to sync. * A lot of SDVO devices fail to notify of sync, but it's * a given it the status is a success, we succeeded. */ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { DRM_DEBUG_KMS("First %s output reported failure to " - "sync\n", SDVO_NAME(sdvo_priv)); + "sync\n", SDVO_NAME(intel_sdvo)); } if (0) - intel_sdvo_set_encoder_power_state(intel_encoder, mode); - intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->attached_output); + intel_sdvo_set_encoder_power_state(intel_sdvo, mode); + intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output); } return; } @@ -1325,42 +1261,31 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; - if (sdvo_priv->pixel_clock_min > mode->clock) + if (intel_sdvo->pixel_clock_min > mode->clock) return MODE_CLOCK_LOW; - if (sdvo_priv->pixel_clock_max < mode->clock) + if (intel_sdvo->pixel_clock_max < mode->clock) return MODE_CLOCK_HIGH; - if (sdvo_priv->is_lvds == true) { - if (sdvo_priv->sdvo_lvds_fixed_mode == NULL) + if (intel_sdvo->is_lvds) { + if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay) return MODE_PANEL; - if (mode->hdisplay > sdvo_priv->sdvo_lvds_fixed_mode->hdisplay) - return MODE_PANEL; - - if (mode->vdisplay > sdvo_priv->sdvo_lvds_fixed_mode->vdisplay) + if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay) return MODE_PANEL; } return MODE_OK; } -static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, struct intel_sdvo_caps *caps) +static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps) { - u8 status; - - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0); - status = intel_sdvo_read_response(intel_encoder, caps, sizeof(*caps)); - if (status != SDVO_CMD_STATUS_SUCCESS) - return false; - - return true; + return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps)); } /* No use! */ @@ -1368,12 +1293,12 @@ static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, str struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) { struct drm_connector *connector = NULL; - struct intel_encoder *iout = NULL; - struct intel_sdvo_priv *sdvo; + struct intel_sdvo *iout = NULL; + struct intel_sdvo *sdvo; /* find the sdvo connector */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - iout = to_intel_encoder(connector); + iout = to_intel_sdvo(connector); if (iout->type != INTEL_OUTPUT_SDVO) continue; @@ -1395,75 +1320,69 @@ int intel_sdvo_supports_hotplug(struct drm_connector *connector) { u8 response[2]; u8 status; - struct intel_encoder *intel_encoder; + struct intel_sdvo *intel_sdvo; DRM_DEBUG_KMS("\n"); if (!connector) return 0; - intel_encoder = to_intel_encoder(connector); - - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); - status = intel_sdvo_read_response(intel_encoder, &response, 2); - - if (response[0] !=0) - return 1; + intel_sdvo = to_intel_sdvo(connector); - return 0; + return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, + &response, 2) && response[0]; } void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) { u8 response[2]; u8 status; - struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector); - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); - intel_sdvo_read_response(intel_encoder, &response, 2); + intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); + intel_sdvo_read_response(intel_sdvo, &response, 2); if (on) { - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); - status = intel_sdvo_read_response(intel_encoder, &response, 2); + intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); + status = intel_sdvo_read_response(intel_sdvo, &response, 2); - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); + intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); } else { response[0] = 0; response[1] = 0; - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); + intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); } - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); - intel_sdvo_read_response(intel_encoder, &response, 2); + intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); + intel_sdvo_read_response(intel_sdvo, &response, 2); } #endif static bool -intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder) +intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) { - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; int caps = 0; - if (sdvo_priv->caps.output_flags & + if (intel_sdvo->caps.output_flags & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) caps++; - if (sdvo_priv->caps.output_flags & + if (intel_sdvo->caps.output_flags & (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)) caps++; - if (sdvo_priv->caps.output_flags & + if (intel_sdvo->caps.output_flags & (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1)) caps++; - if (sdvo_priv->caps.output_flags & + if (intel_sdvo->caps.output_flags & (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1)) caps++; - if (sdvo_priv->caps.output_flags & + if (intel_sdvo->caps.output_flags & (SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1)) caps++; - if (sdvo_priv->caps.output_flags & + if (intel_sdvo->caps.output_flags & (SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1)) caps++; - if (sdvo_priv->caps.output_flags & + if (intel_sdvo->caps.output_flags & (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)) caps++; @@ -1475,11 +1394,11 @@ intel_find_analog_connector(struct drm_device *dev) { struct drm_connector *connector; struct drm_encoder *encoder; - struct intel_encoder *intel_encoder; + struct intel_sdvo *intel_sdvo; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - intel_encoder = enc_to_intel_encoder(encoder); - if (intel_encoder->type == INTEL_OUTPUT_ANALOG) { + intel_sdvo = enc_to_intel_sdvo(encoder); + if (intel_sdvo->base.type == INTEL_OUTPUT_ANALOG) { list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (encoder == intel_attached_encoder(connector)) return connector; @@ -1493,8 +1412,8 @@ static int intel_analog_is_connected(struct drm_device *dev) { struct drm_connector *analog_connector; - analog_connector = intel_find_analog_connector(dev); + analog_connector = intel_find_analog_connector(dev); if (!analog_connector) return false; @@ -1509,54 +1428,52 @@ enum drm_connector_status intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) { struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; - struct intel_connector *intel_connector = to_intel_connector(connector); - struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; + struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); + struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); enum drm_connector_status status = connector_status_connected; struct edid *edid = NULL; - edid = drm_get_edid(connector, intel_encoder->ddc_bus); + edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus); /* This is only applied to SDVO cards with multiple outputs */ - if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) { + if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) { uint8_t saved_ddc, temp_ddc; - saved_ddc = sdvo_priv->ddc_bus; - temp_ddc = sdvo_priv->ddc_bus >> 1; + saved_ddc = intel_sdvo->ddc_bus; + temp_ddc = intel_sdvo->ddc_bus >> 1; /* * Don't use the 1 as the argument of DDC bus switch to get * the EDID. It is used for SDVO SPD ROM. */ while(temp_ddc > 1) { - sdvo_priv->ddc_bus = temp_ddc; - edid = drm_get_edid(connector, intel_encoder->ddc_bus); + intel_sdvo->ddc_bus = temp_ddc; + edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus); if (edid) { /* * When we can get the EDID, maybe it is the * correct DDC bus. Update it. */ - sdvo_priv->ddc_bus = temp_ddc; + intel_sdvo->ddc_bus = temp_ddc; break; } temp_ddc >>= 1; } if (edid == NULL) - sdvo_priv->ddc_bus = saved_ddc; + intel_sdvo->ddc_bus = saved_ddc; } /* when there is no edid and no monitor is connected with VGA * port, try to use the CRT ddc to read the EDID for DVI-connector */ - if (edid == NULL && sdvo_priv->analog_ddc_bus && + if (edid == NULL && intel_sdvo->analog_ddc_bus && !intel_analog_is_connected(connector->dev)) - edid = drm_get_edid(connector, sdvo_priv->analog_ddc_bus); + edid = drm_get_edid(connector, intel_sdvo->analog_ddc_bus); if (edid != NULL) { bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); - bool need_digital = !!(sdvo_connector->output_flag & SDVO_TMDS_MASK); + bool need_digital = !!(intel_sdvo_connector->output_flag & SDVO_TMDS_MASK); /* DDC bus is shared, match EDID to connector type */ if (is_digital && need_digital) - sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid); + intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid); else if (is_digital != need_digital) status = connector_status_disconnected; @@ -1572,33 +1489,29 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) { uint16_t response; - u8 status; struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_connector *intel_connector = to_intel_connector(connector); - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; - struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; + struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); + struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); enum drm_connector_status ret; - intel_sdvo_write_cmd(intel_encoder, - SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); - if (sdvo_priv->is_tv) { + if (!intel_sdvo_write_cmd(intel_sdvo, + SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) + return connector_status_unknown; + if (intel_sdvo->is_tv) { /* add 30ms delay when the output type is SDVO-TV */ mdelay(30); } - status = intel_sdvo_read_response(intel_encoder, &response, 2); + if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) + return connector_status_unknown; DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); - if (status != SDVO_CMD_STATUS_SUCCESS) - return connector_status_unknown; - if (response == 0) return connector_status_disconnected; - sdvo_priv->attached_output = response; + intel_sdvo->attached_output = response; - if ((sdvo_connector->output_flag & response) == 0) + if ((intel_sdvo_connector->output_flag & response) == 0) ret = connector_status_disconnected; else if (response & SDVO_TMDS_MASK) ret = intel_sdvo_hdmi_sink_detect(connector); @@ -1607,16 +1520,16 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect /* May update encoder flag for like clock for SDVO TV, etc.*/ if (ret == connector_status_connected) { - sdvo_priv->is_tv = false; - sdvo_priv->is_lvds = false; - intel_encoder->needs_tv_clock = false; + intel_sdvo->is_tv = false; + intel_sdvo->is_lvds = false; + intel_sdvo->base.needs_tv_clock = false; if (response & SDVO_TV_MASK) { - sdvo_priv->is_tv = true; - intel_encoder->needs_tv_clock = true; + intel_sdvo->is_tv = true; + intel_sdvo->base.needs_tv_clock = true; } if (response & SDVO_LVDS_MASK) - sdvo_priv->is_lvds = true; + intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL; } return ret; @@ -1625,12 +1538,11 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) { struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); int num_modes; /* set the bus switch and get the modes */ - num_modes = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); + num_modes = intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus); /* * Mac mini hack. On this device, the DVI-I connector shares one DDC @@ -1639,11 +1551,11 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) * which case we'll look there for the digital DDC data. */ if (num_modes == 0 && - sdvo_priv->analog_ddc_bus && + intel_sdvo->analog_ddc_bus && !intel_analog_is_connected(connector->dev)) { /* Switch to the analog ddc bus and try that */ - (void) intel_ddc_get_modes(connector, sdvo_priv->analog_ddc_bus); + (void) intel_ddc_get_modes(connector, intel_sdvo->analog_ddc_bus); } } @@ -1715,52 +1627,43 @@ struct drm_display_mode sdvo_tv_modes[] = { static void intel_sdvo_get_tv_modes(struct drm_connector *connector) { struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); struct intel_sdvo_sdtv_resolution_request tv_res; uint32_t reply = 0, format_map = 0; int i; - uint8_t status; - /* Read the list of supported input resolutions for the selected TV * format. */ - for (i = 0; i < TV_FORMAT_NUM; i++) - if (tv_format_names[i] == sdvo_priv->tv_format_name) - break; - - format_map = (1 << i); + format_map = 1 << intel_sdvo->tv_format_index; memcpy(&tv_res, &format_map, - sizeof(struct intel_sdvo_sdtv_resolution_request) > - sizeof(format_map) ? sizeof(format_map) : - sizeof(struct intel_sdvo_sdtv_resolution_request)); + min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request))); - intel_sdvo_set_target_output(intel_encoder, sdvo_priv->attached_output); + if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output)) + return; - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, - &tv_res, sizeof(tv_res)); - status = intel_sdvo_read_response(intel_encoder, &reply, 3); - if (status != SDVO_CMD_STATUS_SUCCESS) + BUILD_BUG_ON(sizeof(tv_res) != 3); + if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, + &tv_res, sizeof(tv_res))) + return; + if (!intel_sdvo_read_response(intel_sdvo, &reply, 3)) return; for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++) if (reply & (1 << i)) { struct drm_display_mode *nmode; nmode = drm_mode_duplicate(connector->dev, - &sdvo_tv_modes[i]); + &sdvo_tv_modes[i]); if (nmode) drm_mode_probed_add(connector, nmode); } - } static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) { struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); struct drm_i915_private *dev_priv = connector->dev->dev_private; - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; struct drm_display_mode *newmode; /* @@ -1768,7 +1671,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) * Assume that the preferred modes are * arranged in priority order. */ - intel_ddc_get_modes(connector, intel_encoder->ddc_bus); + intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus); if (list_empty(&connector->probed_modes) == false) goto end; @@ -1787,8 +1690,9 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) end: list_for_each_entry(newmode, &connector->probed_modes, head) { if (newmode->type & DRM_MODE_TYPE_PREFERRED) { - sdvo_priv->sdvo_lvds_fixed_mode = + intel_sdvo->sdvo_lvds_fixed_mode = drm_mode_duplicate(connector->dev, newmode); + intel_sdvo->is_lvds = true; break; } } @@ -1797,66 +1701,67 @@ end: static int intel_sdvo_get_modes(struct drm_connector *connector) { - struct intel_connector *intel_connector = to_intel_connector(connector); - struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; + struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); - if (IS_TV(sdvo_connector)) + if (IS_TV(intel_sdvo_connector)) intel_sdvo_get_tv_modes(connector); - else if (IS_LVDS(sdvo_connector)) + else if (IS_LVDS(intel_sdvo_connector)) intel_sdvo_get_lvds_modes(connector); else intel_sdvo_get_ddc_modes(connector); - if (list_empty(&connector->probed_modes)) - return 0; - return 1; + return !list_empty(&connector->probed_modes); } -static -void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) +static void +intel_sdvo_destroy_enhance_property(struct drm_connector *connector) { - struct intel_connector *intel_connector = to_intel_connector(connector); - struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv; + struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); struct drm_device *dev = connector->dev; - if (IS_TV(sdvo_priv)) { - if (sdvo_priv->left_property) - drm_property_destroy(dev, sdvo_priv->left_property); - if (sdvo_priv->right_property) - drm_property_destroy(dev, sdvo_priv->right_property); - if (sdvo_priv->top_property) - drm_property_destroy(dev, sdvo_priv->top_property); - if (sdvo_priv->bottom_property) - drm_property_destroy(dev, sdvo_priv->bottom_property); - if (sdvo_priv->hpos_property) - drm_property_destroy(dev, sdvo_priv->hpos_property); - if (sdvo_priv->vpos_property) - drm_property_destroy(dev, sdvo_priv->vpos_property); - if (sdvo_priv->saturation_property) - drm_property_destroy(dev, - sdvo_priv->saturation_property); - if (sdvo_priv->contrast_property) - drm_property_destroy(dev, - sdvo_priv->contrast_property); - if (sdvo_priv->hue_property) - drm_property_destroy(dev, sdvo_priv->hue_property); - } - if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) { - if (sdvo_priv->brightness_property) - drm_property_destroy(dev, - sdvo_priv->brightness_property); - } - return; + if (intel_sdvo_connector->left) + drm_property_destroy(dev, intel_sdvo_connector->left); + if (intel_sdvo_connector->right) + drm_property_destroy(dev, intel_sdvo_connector->right); + if (intel_sdvo_connector->top) + drm_property_destroy(dev, intel_sdvo_connector->top); + if (intel_sdvo_connector->bottom) + drm_property_destroy(dev, intel_sdvo_connector->bottom); + if (intel_sdvo_connector->hpos) + drm_property_destroy(dev, intel_sdvo_connector->hpos); + if (intel_sdvo_connector->vpos) + drm_property_destroy(dev, intel_sdvo_connector->vpos); + if (intel_sdvo_connector->saturation) + drm_property_destroy(dev, intel_sdvo_connector->saturation); + if (intel_sdvo_connector->contrast) + drm_property_destroy(dev, intel_sdvo_connector->contrast); + if (intel_sdvo_connector->hue) + drm_property_destroy(dev, intel_sdvo_connector->hue); + if (intel_sdvo_connector->sharpness) + drm_property_destroy(dev, intel_sdvo_connector->sharpness); + if (intel_sdvo_connector->flicker_filter) + drm_property_destroy(dev, intel_sdvo_connector->flicker_filter); + if (intel_sdvo_connector->flicker_filter_2d) + drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d); + if (intel_sdvo_connector->flicker_filter_adaptive) + drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive); + if (intel_sdvo_connector->tv_luma_filter) + drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter); + if (intel_sdvo_connector->tv_chroma_filter) + drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter); + if (intel_sdvo_connector->dot_crawl) + drm_property_destroy(dev, intel_sdvo_connector->dot_crawl); + if (intel_sdvo_connector->brightness) + drm_property_destroy(dev, intel_sdvo_connector->brightness); } static void intel_sdvo_destroy(struct drm_connector *connector) { - struct intel_connector *intel_connector = to_intel_connector(connector); - struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; + struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); - if (sdvo_connector->tv_format_property) + if (intel_sdvo_connector->tv_format) drm_property_destroy(connector->dev, - sdvo_connector->tv_format_property); + intel_sdvo_connector->tv_format); intel_sdvo_destroy_enhance_property(connector); drm_sysfs_connector_remove(connector); @@ -1870,132 +1775,118 @@ intel_sdvo_set_property(struct drm_connector *connector, uint64_t val) { struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; - struct intel_connector *intel_connector = to_intel_connector(connector); - struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; - struct drm_crtc *crtc = encoder->crtc; - int ret = 0; - bool changed = false; - uint8_t cmd, status; + struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); + struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); uint16_t temp_value; + uint8_t cmd; + int ret; ret = drm_connector_property_set_value(connector, property, val); - if (ret < 0) - goto out; + if (ret) + return ret; + +#define CHECK_PROPERTY(name, NAME) \ + if (intel_sdvo_connector->name == property) { \ + if (intel_sdvo_connector->cur_##name == temp_value) return 0; \ + if (intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \ + cmd = SDVO_CMD_SET_##NAME; \ + intel_sdvo_connector->cur_##name = temp_value; \ + goto set_value; \ + } - if (property == sdvo_connector->tv_format_property) { - if (val >= TV_FORMAT_NUM) { - ret = -EINVAL; - goto out; - } - if (sdvo_priv->tv_format_name == - sdvo_connector->tv_format_supported[val]) - goto out; + if (property == intel_sdvo_connector->tv_format) { + if (val >= TV_FORMAT_NUM) + return -EINVAL; - sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[val]; - changed = true; - } + if (intel_sdvo->tv_format_index == + intel_sdvo_connector->tv_format_supported[val]) + return 0; - if (IS_TV(sdvo_connector) || IS_LVDS(sdvo_connector)) { - cmd = 0; + intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[val]; + goto done; + } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) { temp_value = val; - if (sdvo_connector->left_property == property) { + if (intel_sdvo_connector->left == property) { drm_connector_property_set_value(connector, - sdvo_connector->right_property, val); - if (sdvo_connector->left_margin == temp_value) - goto out; - - sdvo_connector->left_margin = temp_value; - sdvo_connector->right_margin = temp_value; - temp_value = sdvo_connector->max_hscan - - sdvo_connector->left_margin; + intel_sdvo_connector->right, val); + if (intel_sdvo_connector->left_margin == temp_value) + return 0; + + intel_sdvo_connector->left_margin = temp_value; + intel_sdvo_connector->right_margin = temp_value; + temp_value = intel_sdvo_connector->max_hscan - + intel_sdvo_connector->left_margin; cmd = SDVO_CMD_SET_OVERSCAN_H; - } else if (sdvo_connector->right_property == property) { + goto set_value; + } else if (intel_sdvo_connector->right == property) { drm_connector_property_set_value(connector, - sdvo_connector->left_property, val); - if (sdvo_connector->right_margin == temp_value) - goto out; - - sdvo_connector->left_margin = temp_value; - sdvo_connector->right_margin = temp_value; - temp_value = sdvo_connector->max_hscan - - sdvo_connector->left_margin; + intel_sdvo_connector->left, val); + if (intel_sdvo_connector->right_margin == temp_value) + return 0; + + intel_sdvo_connector->left_margin = temp_value; + intel_sdvo_connector->right_margin = temp_value; + temp_value = intel_sdvo_connector->max_hscan - + intel_sdvo_connector->left_margin; cmd = SDVO_CMD_SET_OVERSCAN_H; - } else if (sdvo_connector->top_property == property) { + goto set_value; + } else if (intel_sdvo_connector->top == property) { drm_connector_property_set_value(connector, - sdvo_connector->bottom_property, val); - if (sdvo_connector->top_margin == temp_value) - goto out; - - sdvo_connector->top_margin = temp_value; - sdvo_connector->bottom_margin = temp_value; - temp_value = sdvo_connector->max_vscan - - sdvo_connector->top_margin; + intel_sdvo_connector->bottom, val); + if (intel_sdvo_connector->top_margin == temp_value) + return 0; + + intel_sdvo_connector->top_margin = temp_value; + intel_sdvo_connector->bottom_margin = temp_value; + temp_value = intel_sdvo_connector->max_vscan - + intel_sdvo_connector->top_margin; cmd = SDVO_CMD_SET_OVERSCAN_V; - } else if (sdvo_connector->bottom_property == property) { + goto set_value; + } else if (intel_sdvo_connector->bottom == property) { drm_connector_property_set_value(connector, - sdvo_connector->top_property, val); - if (sdvo_connector->bottom_margin == temp_value) - goto out; - sdvo_connector->top_margin = temp_value; - sdvo_connector->bottom_margin = temp_value; - temp_value = sdvo_connector->max_vscan - - sdvo_connector->top_margin; + intel_sdvo_connector->top, val); + if (intel_sdvo_connector->bottom_margin == temp_value) + return 0; + + intel_sdvo_connector->top_margin = temp_value; + intel_sdvo_connector->bottom_margin = temp_value; + temp_value = intel_sdvo_connector->max_vscan - + intel_sdvo_connector->top_margin; cmd = SDVO_CMD_SET_OVERSCAN_V; - } else if (sdvo_connector->hpos_property == property) { - if (sdvo_connector->cur_hpos == temp_value) - goto out; - - cmd = SDVO_CMD_SET_POSITION_H; - sdvo_connector->cur_hpos = temp_value; - } else if (sdvo_connector->vpos_property == property) { - if (sdvo_connector->cur_vpos == temp_value) - goto out; - - cmd = SDVO_CMD_SET_POSITION_V; - sdvo_connector->cur_vpos = temp_value; - } else if (sdvo_connector->saturation_property == property) { - if (sdvo_connector->cur_saturation == temp_value) - goto out; - - cmd = SDVO_CMD_SET_SATURATION; - sdvo_connector->cur_saturation = temp_value; - } else if (sdvo_connector->contrast_property == property) { - if (sdvo_connector->cur_contrast == temp_value) - goto out; - - cmd = SDVO_CMD_SET_CONTRAST; - sdvo_connector->cur_contrast = temp_value; - } else if (sdvo_connector->hue_property == property) { - if (sdvo_connector->cur_hue == temp_value) - goto out; - - cmd = SDVO_CMD_SET_HUE; - sdvo_connector->cur_hue = temp_value; - } else if (sdvo_connector->brightness_property == property) { - if (sdvo_connector->cur_brightness == temp_value) - goto out; - - cmd = SDVO_CMD_SET_BRIGHTNESS; - sdvo_connector->cur_brightness = temp_value; - } - if (cmd) { - intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2); - status = intel_sdvo_read_response(intel_encoder, - NULL, 0); - if (status != SDVO_CMD_STATUS_SUCCESS) { - DRM_DEBUG_KMS("Incorrect SDVO command \n"); - return -EINVAL; - } - changed = true; + goto set_value; } + CHECK_PROPERTY(hpos, HPOS) + CHECK_PROPERTY(vpos, VPOS) + CHECK_PROPERTY(saturation, SATURATION) + CHECK_PROPERTY(contrast, CONTRAST) + CHECK_PROPERTY(hue, HUE) + CHECK_PROPERTY(brightness, BRIGHTNESS) + CHECK_PROPERTY(sharpness, SHARPNESS) + CHECK_PROPERTY(flicker_filter, FLICKER_FILTER) + CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D) + CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE) + CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER) + CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER) + CHECK_PROPERTY(dot_crawl, DOT_CRAWL) } - if (changed && crtc) + + return -EINVAL; /* unknown property */ + +set_value: + if (!intel_sdvo_set_value(intel_sdvo, cmd, &temp_value, 2)) + return -EIO; + + +done: + if (encoder->crtc) { + struct drm_crtc *crtc = encoder->crtc; + drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, - crtc->y, crtc->fb); -out: - return ret; + crtc->y, crtc->fb); + } + + return 0; +#undef CHECK_PROPERTY } static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = { @@ -2022,28 +1913,57 @@ static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) { - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); - if (intel_encoder->i2c_bus) - intel_i2c_destroy(intel_encoder->i2c_bus); - if (intel_encoder->ddc_bus) - intel_i2c_destroy(intel_encoder->ddc_bus); - if (sdvo_priv->analog_ddc_bus) - intel_i2c_destroy(sdvo_priv->analog_ddc_bus); + if (intel_sdvo->analog_ddc_bus) + intel_i2c_destroy(intel_sdvo->analog_ddc_bus); - if (sdvo_priv->sdvo_lvds_fixed_mode != NULL) + if (intel_sdvo->sdvo_lvds_fixed_mode != NULL) drm_mode_destroy(encoder->dev, - sdvo_priv->sdvo_lvds_fixed_mode); + intel_sdvo->sdvo_lvds_fixed_mode); - drm_encoder_cleanup(encoder); - kfree(intel_encoder); + intel_encoder_destroy(encoder); } static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { .destroy = intel_sdvo_enc_destroy, }; +static void +intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo) +{ + uint16_t mask = 0; + unsigned int num_bits; + + /* Make a mask of outputs less than or equal to our own priority in the + * list. + */ + switch (sdvo->controlled_output) { + case SDVO_OUTPUT_LVDS1: + mask |= SDVO_OUTPUT_LVDS1; + case SDVO_OUTPUT_LVDS0: + mask |= SDVO_OUTPUT_LVDS0; + case SDVO_OUTPUT_TMDS1: + mask |= SDVO_OUTPUT_TMDS1; + case SDVO_OUTPUT_TMDS0: + mask |= SDVO_OUTPUT_TMDS0; + case SDVO_OUTPUT_RGB1: + mask |= SDVO_OUTPUT_RGB1; + case SDVO_OUTPUT_RGB0: + mask |= SDVO_OUTPUT_RGB0; + break; + } + + /* Count bits to find what number we are in the priority list. */ + mask &= sdvo->caps.output_flags; + num_bits = hweight16(mask); + /* If more than 3 outputs, default to DDC bus 3 for now. */ + if (num_bits > 3) + num_bits = 3; + + /* Corresponds to SDVO_CONTROL_BUS_DDCx */ + sdvo->ddc_bus = 1 << num_bits; +} /** * Choose the appropriate DDC bus for control bus switch command for this @@ -2054,7 +1974,7 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { */ static void intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, - struct intel_sdvo_priv *sdvo, u32 reg) + struct intel_sdvo *sdvo, u32 reg) { struct sdvo_device_mapping *mapping; @@ -2063,61 +1983,53 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, else mapping = &(dev_priv->sdvo_mappings[1]); - sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); + if (mapping->initialized) + sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); + else + intel_sdvo_guess_ddc_bus(sdvo); } static bool -intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output, int device) +intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device) { - struct intel_sdvo_priv *sdvo_priv = output->dev_priv; - uint8_t status; - - if (device == 0) - intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS0); - else - intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS1); - - intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0); - status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1); - if (status != SDVO_CMD_STATUS_SUCCESS) - return false; - return true; + return intel_sdvo_set_target_output(intel_sdvo, + device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) && + intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, + &intel_sdvo->is_hdmi, 1); } -static struct intel_encoder * -intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan) +static struct intel_sdvo * +intel_sdvo_chan_to_intel_sdvo(struct intel_i2c_chan *chan) { struct drm_device *dev = chan->drm_dev; struct drm_encoder *encoder; - struct intel_encoder *intel_encoder = NULL; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - intel_encoder = enc_to_intel_encoder(encoder); - if (intel_encoder->ddc_bus == &chan->adapter) - break; + struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); + if (intel_sdvo->base.ddc_bus == &chan->adapter) + return intel_sdvo; } - return intel_encoder; + + return NULL; } static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num) { - struct intel_encoder *intel_encoder; - struct intel_sdvo_priv *sdvo_priv; + struct intel_sdvo *intel_sdvo; struct i2c_algo_bit_data *algo_data; const struct i2c_algorithm *algo; algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; - intel_encoder = - intel_sdvo_chan_to_intel_encoder( - (struct intel_i2c_chan *)(algo_data->data)); - if (intel_encoder == NULL) + intel_sdvo = + intel_sdvo_chan_to_intel_sdvo((struct intel_i2c_chan *) + (algo_data->data)); + if (intel_sdvo == NULL) return -EINVAL; - sdvo_priv = intel_encoder->dev_priv; - algo = intel_encoder->i2c_bus->algo; + algo = intel_sdvo->base.i2c_bus->algo; - intel_sdvo_set_control_bus_switch(intel_encoder, sdvo_priv->ddc_bus); + intel_sdvo_set_control_bus_switch(intel_sdvo, intel_sdvo->ddc_bus); return algo->master_xfer(i2c_adap, msgs, num); } @@ -2162,27 +2074,9 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) return 0x72; } -static bool -intel_sdvo_connector_alloc (struct intel_connector **ret) -{ - struct intel_connector *intel_connector; - struct intel_sdvo_connector *sdvo_connector; - - *ret = kzalloc(sizeof(*intel_connector) + - sizeof(*sdvo_connector), GFP_KERNEL); - if (!*ret) - return false; - - intel_connector = *ret; - sdvo_connector = (struct intel_sdvo_connector *)(intel_connector + 1); - intel_connector->dev_priv = sdvo_connector; - - return true; -} - static void -intel_sdvo_connector_create (struct drm_encoder *encoder, - struct drm_connector *connector) +intel_sdvo_connector_init(struct drm_encoder *encoder, + struct drm_connector *connector) { drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs, connector->connector_type); @@ -2198,582 +2092,470 @@ intel_sdvo_connector_create (struct drm_encoder *encoder, } static bool -intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device) +intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) { - struct drm_encoder *encoder = &intel_encoder->enc; - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + struct drm_encoder *encoder = &intel_sdvo->base.enc; struct drm_connector *connector; struct intel_connector *intel_connector; - struct intel_sdvo_connector *sdvo_connector; + struct intel_sdvo_connector *intel_sdvo_connector; - if (!intel_sdvo_connector_alloc(&intel_connector)) + intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); + if (!intel_sdvo_connector) return false; - sdvo_connector = intel_connector->dev_priv; - if (device == 0) { - sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS0; - sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0; + intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0; + intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0; } else if (device == 1) { - sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS1; - sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1; + intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1; + intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1; } + intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; encoder->encoder_type = DRM_MODE_ENCODER_TMDS; connector->connector_type = DRM_MODE_CONNECTOR_DVID; - if (intel_sdvo_get_supp_encode(intel_encoder, &sdvo_priv->encode) - && intel_sdvo_get_digital_encoding_mode(intel_encoder, device) - && sdvo_priv->is_hdmi) { + if (intel_sdvo_get_supp_encode(intel_sdvo, &intel_sdvo->encode) + && intel_sdvo_get_digital_encoding_mode(intel_sdvo, device) + && intel_sdvo->is_hdmi) { /* enable hdmi encoding mode if supported */ - intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI); - intel_sdvo_set_colorimetry(intel_encoder, + intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); + intel_sdvo_set_colorimetry(intel_sdvo, SDVO_COLORIMETRY_RGB256); connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; } - intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | - (1 << INTEL_ANALOG_CLONE_BIT); + intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | + (1 << INTEL_ANALOG_CLONE_BIT)); - intel_sdvo_connector_create(encoder, connector); + intel_sdvo_connector_init(encoder, connector); return true; } static bool -intel_sdvo_tv_init(struct intel_encoder *intel_encoder, int type) +intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) { - struct drm_encoder *encoder = &intel_encoder->enc; - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + struct drm_encoder *encoder = &intel_sdvo->base.enc; struct drm_connector *connector; struct intel_connector *intel_connector; - struct intel_sdvo_connector *sdvo_connector; + struct intel_sdvo_connector *intel_sdvo_connector; - if (!intel_sdvo_connector_alloc(&intel_connector)) - return false; + intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); + if (!intel_sdvo_connector) + return false; + intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; - sdvo_connector = intel_connector->dev_priv; - sdvo_priv->controlled_output |= type; - sdvo_connector->output_flag = type; + intel_sdvo->controlled_output |= type; + intel_sdvo_connector->output_flag = type; - sdvo_priv->is_tv = true; - intel_encoder->needs_tv_clock = true; - intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; + intel_sdvo->is_tv = true; + intel_sdvo->base.needs_tv_clock = true; + intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; - intel_sdvo_connector_create(encoder, connector); + intel_sdvo_connector_init(encoder, connector); - intel_sdvo_tv_create_property(connector, type); + if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type)) + goto err; - intel_sdvo_create_enhance_property(connector); + if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) + goto err; return true; + +err: + intel_sdvo_destroy_enhance_property(connector); + kfree(intel_sdvo_connector); + return false; } static bool -intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device) +intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) { - struct drm_encoder *encoder = &intel_encoder->enc; - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + struct drm_encoder *encoder = &intel_sdvo->base.enc; struct drm_connector *connector; struct intel_connector *intel_connector; - struct intel_sdvo_connector *sdvo_connector; + struct intel_sdvo_connector *intel_sdvo_connector; - if (!intel_sdvo_connector_alloc(&intel_connector)) - return false; + intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); + if (!intel_sdvo_connector) + return false; + intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; connector->polled = DRM_CONNECTOR_POLL_CONNECT; encoder->encoder_type = DRM_MODE_ENCODER_DAC; connector->connector_type = DRM_MODE_CONNECTOR_VGA; - sdvo_connector = intel_connector->dev_priv; if (device == 0) { - sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB0; - sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; + intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0; + intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; } else if (device == 1) { - sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB1; - sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; + intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1; + intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; } - intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | - (1 << INTEL_ANALOG_CLONE_BIT); + intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | + (1 << INTEL_ANALOG_CLONE_BIT)); - intel_sdvo_connector_create(encoder, connector); + intel_sdvo_connector_init(encoder, connector); return true; } static bool -intel_sdvo_lvds_init(struct intel_encoder *intel_encoder, int device) +intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) { - struct drm_encoder *encoder = &intel_encoder->enc; - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + struct drm_encoder *encoder = &intel_sdvo->base.enc; struct drm_connector *connector; struct intel_connector *intel_connector; - struct intel_sdvo_connector *sdvo_connector; + struct intel_sdvo_connector *intel_sdvo_connector; - if (!intel_sdvo_connector_alloc(&intel_connector)) - return false; + intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); + if (!intel_sdvo_connector) + return false; - connector = &intel_connector->base; + intel_connector = &intel_sdvo_connector->base; + connector = &intel_connector->base; encoder->encoder_type = DRM_MODE_ENCODER_LVDS; connector->connector_type = DRM_MODE_CONNECTOR_LVDS; - sdvo_connector = intel_connector->dev_priv; - - sdvo_priv->is_lvds = true; if (device == 0) { - sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS0; - sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; + intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0; + intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; } else if (device == 1) { - sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS1; - sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; + intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1; + intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; } - intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | - (1 << INTEL_SDVO_LVDS_CLONE_BIT); + intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) | + (1 << INTEL_SDVO_LVDS_CLONE_BIT)); - intel_sdvo_connector_create(encoder, connector); - intel_sdvo_create_enhance_property(connector); - return true; + intel_sdvo_connector_init(encoder, connector); + if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) + goto err; + + return true; + +err: + intel_sdvo_destroy_enhance_property(connector); + kfree(intel_sdvo_connector); + return false; } static bool -intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags) +intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags) { - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; - - sdvo_priv->is_tv = false; - intel_encoder->needs_tv_clock = false; - sdvo_priv->is_lvds = false; + intel_sdvo->is_tv = false; + intel_sdvo->base.needs_tv_clock = false; + intel_sdvo->is_lvds = false; /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ if (flags & SDVO_OUTPUT_TMDS0) - if (!intel_sdvo_dvi_init(intel_encoder, 0)) + if (!intel_sdvo_dvi_init(intel_sdvo, 0)) return false; if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK) - if (!intel_sdvo_dvi_init(intel_encoder, 1)) + if (!intel_sdvo_dvi_init(intel_sdvo, 1)) return false; /* TV has no XXX1 function block */ if (flags & SDVO_OUTPUT_SVID0) - if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_SVID0)) + if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_SVID0)) return false; if (flags & SDVO_OUTPUT_CVBS0) - if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_CVBS0)) + if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0)) return false; if (flags & SDVO_OUTPUT_RGB0) - if (!intel_sdvo_analog_init(intel_encoder, 0)) + if (!intel_sdvo_analog_init(intel_sdvo, 0)) return false; if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK) - if (!intel_sdvo_analog_init(intel_encoder, 1)) + if (!intel_sdvo_analog_init(intel_sdvo, 1)) return false; if (flags & SDVO_OUTPUT_LVDS0) - if (!intel_sdvo_lvds_init(intel_encoder, 0)) + if (!intel_sdvo_lvds_init(intel_sdvo, 0)) return false; if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK) - if (!intel_sdvo_lvds_init(intel_encoder, 1)) + if (!intel_sdvo_lvds_init(intel_sdvo, 1)) return false; if ((flags & SDVO_OUTPUT_MASK) == 0) { unsigned char bytes[2]; - sdvo_priv->controlled_output = 0; - memcpy(bytes, &sdvo_priv->caps.output_flags, 2); + intel_sdvo->controlled_output = 0; + memcpy(bytes, &intel_sdvo->caps.output_flags, 2); DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", - SDVO_NAME(sdvo_priv), + SDVO_NAME(intel_sdvo), bytes[0], bytes[1]); return false; } - intel_encoder->crtc_mask = (1 << 0) | (1 << 1); + intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1); return true; } -static void intel_sdvo_tv_create_property(struct drm_connector *connector, int type) +static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, + struct intel_sdvo_connector *intel_sdvo_connector, + int type) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; - struct intel_connector *intel_connector = to_intel_connector(connector); - struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; + struct drm_device *dev = intel_sdvo->base.enc.dev; struct intel_sdvo_tv_format format; uint32_t format_map, i; - uint8_t status; - intel_sdvo_set_target_output(intel_encoder, type); + if (!intel_sdvo_set_target_output(intel_sdvo, type)) + return false; - intel_sdvo_write_cmd(intel_encoder, - SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0); - status = intel_sdvo_read_response(intel_encoder, - &format, sizeof(format)); - if (status != SDVO_CMD_STATUS_SUCCESS) - return; + if (!intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_SUPPORTED_TV_FORMATS, + &format, sizeof(format))) + return false; - memcpy(&format_map, &format, sizeof(format) > sizeof(format_map) ? - sizeof(format_map) : sizeof(format)); + memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format))); if (format_map == 0) - return; + return false; - sdvo_connector->format_supported_num = 0; + intel_sdvo_connector->format_supported_num = 0; for (i = 0 ; i < TV_FORMAT_NUM; i++) - if (format_map & (1 << i)) { - sdvo_connector->tv_format_supported - [sdvo_connector->format_supported_num++] = - tv_format_names[i]; - } + if (format_map & (1 << i)) + intel_sdvo_connector->tv_format_supported[intel_sdvo_connector->format_supported_num++] = i; - sdvo_connector->tv_format_property = - drm_property_create( - connector->dev, DRM_MODE_PROP_ENUM, - "mode", sdvo_connector->format_supported_num); + intel_sdvo_connector->tv_format = + drm_property_create(dev, DRM_MODE_PROP_ENUM, + "mode", intel_sdvo_connector->format_supported_num); + if (!intel_sdvo_connector->tv_format) + return false; - for (i = 0; i < sdvo_connector->format_supported_num; i++) + for (i = 0; i < intel_sdvo_connector->format_supported_num; i++) drm_property_add_enum( - sdvo_connector->tv_format_property, i, - i, sdvo_connector->tv_format_supported[i]); + intel_sdvo_connector->tv_format, i, + i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]); - sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[0]; - drm_connector_attach_property( - connector, sdvo_connector->tv_format_property, 0); + intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0]; + drm_connector_attach_property(&intel_sdvo_connector->base.base, + intel_sdvo_connector->tv_format, 0); + return true; } -static void intel_sdvo_create_enhance_property(struct drm_connector *connector) +#define ENHANCEMENT(name, NAME) do { \ + if (enhancements.name) { \ + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \ + !intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \ + return false; \ + intel_sdvo_connector->max_##name = data_value[0]; \ + intel_sdvo_connector->cur_##name = response; \ + intel_sdvo_connector->name = \ + drm_property_create(dev, DRM_MODE_PROP_RANGE, #name, 2); \ + if (!intel_sdvo_connector->name) return false; \ + intel_sdvo_connector->name->values[0] = 0; \ + intel_sdvo_connector->name->values[1] = data_value[0]; \ + drm_connector_attach_property(connector, \ + intel_sdvo_connector->name, \ + intel_sdvo_connector->cur_##name); \ + DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ + data_value[0], data_value[1], response); \ + } \ +} while(0) + +static bool +intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, + struct intel_sdvo_connector *intel_sdvo_connector, + struct intel_sdvo_enhancements_reply enhancements) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_connector *intel_connector = to_intel_connector(connector); - struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv; - struct intel_sdvo_enhancements_reply sdvo_data; - struct drm_device *dev = connector->dev; - uint8_t status; + struct drm_device *dev = intel_sdvo->base.enc.dev; + struct drm_connector *connector = &intel_sdvo_connector->base.base; uint16_t response, data_value[2]; - intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, - NULL, 0); - status = intel_sdvo_read_response(intel_encoder, &sdvo_data, - sizeof(sdvo_data)); - if (status != SDVO_CMD_STATUS_SUCCESS) { - DRM_DEBUG_KMS(" incorrect response is returned\n"); - return; + /* when horizontal overscan is supported, Add the left/right property */ + if (enhancements.overscan_h) { + if (!intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_MAX_OVERSCAN_H, + &data_value, 4)) + return false; + + if (!intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_OVERSCAN_H, + &response, 2)) + return false; + + intel_sdvo_connector->max_hscan = data_value[0]; + intel_sdvo_connector->left_margin = data_value[0] - response; + intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin; + intel_sdvo_connector->left = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "left_margin", 2); + if (!intel_sdvo_connector->left) + return false; + + intel_sdvo_connector->left->values[0] = 0; + intel_sdvo_connector->left->values[1] = data_value[0]; + drm_connector_attach_property(connector, + intel_sdvo_connector->left, + intel_sdvo_connector->left_margin); + + intel_sdvo_connector->right = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "right_margin", 2); + if (!intel_sdvo_connector->right) + return false; + + intel_sdvo_connector->right->values[0] = 0; + intel_sdvo_connector->right->values[1] = data_value[0]; + drm_connector_attach_property(connector, + intel_sdvo_connector->right, + intel_sdvo_connector->right_margin); + DRM_DEBUG_KMS("h_overscan: max %d, " + "default %d, current %d\n", + data_value[0], data_value[1], response); } - response = *((uint16_t *)&sdvo_data); - if (!response) { - DRM_DEBUG_KMS("No enhancement is supported\n"); - return; + + if (enhancements.overscan_v) { + if (!intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_MAX_OVERSCAN_V, + &data_value, 4)) + return false; + + if (!intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_OVERSCAN_V, + &response, 2)) + return false; + + intel_sdvo_connector->max_vscan = data_value[0]; + intel_sdvo_connector->top_margin = data_value[0] - response; + intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin; + intel_sdvo_connector->top = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "top_margin", 2); + if (!intel_sdvo_connector->top) + return false; + + intel_sdvo_connector->top->values[0] = 0; + intel_sdvo_connector->top->values[1] = data_value[0]; + drm_connector_attach_property(connector, + intel_sdvo_connector->top, + intel_sdvo_connector->top_margin); + + intel_sdvo_connector->bottom = + drm_property_create(dev, DRM_MODE_PROP_RANGE, + "bottom_margin", 2); + if (!intel_sdvo_connector->bottom) + return false; + + intel_sdvo_connector->bottom->values[0] = 0; + intel_sdvo_connector->bottom->values[1] = data_value[0]; + drm_connector_attach_property(connector, + intel_sdvo_connector->bottom, + intel_sdvo_connector->bottom_margin); + DRM_DEBUG_KMS("v_overscan: max %d, " + "default %d, current %d\n", + data_value[0], data_value[1], response); } - if (IS_TV(sdvo_priv)) { - /* when horizontal overscan is supported, Add the left/right - * property - */ - if (sdvo_data.overscan_h) { - intel_sdvo_write_cmd(intel_encoder, - SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0); - status = intel_sdvo_read_response(intel_encoder, - &data_value, 4); - if (status != SDVO_CMD_STATUS_SUCCESS) { - DRM_DEBUG_KMS("Incorrect SDVO max " - "h_overscan\n"); - return; - } - intel_sdvo_write_cmd(intel_encoder, - SDVO_CMD_GET_OVERSCAN_H, NULL, 0); - status = intel_sdvo_read_response(intel_encoder, - &response, 2); - if (status != SDVO_CMD_STATUS_SUCCESS) { - DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n"); - return; - } - sdvo_priv->max_hscan = data_value[0]; - sdvo_priv->left_margin = data_value[0] - response; - sdvo_priv->right_margin = sdvo_priv->left_margin; - sdvo_priv->left_property = - drm_property_create(dev, DRM_MODE_PROP_RANGE, - "left_margin", 2); - sdvo_priv->left_property->values[0] = 0; - sdvo_priv->left_property->values[1] = data_value[0]; - drm_connector_attach_property(connector, - sdvo_priv->left_property, - sdvo_priv->left_margin); - sdvo_priv->right_property = - drm_property_create(dev, DRM_MODE_PROP_RANGE, - "right_margin", 2); - sdvo_priv->right_property->values[0] = 0; - sdvo_priv->right_property->values[1] = data_value[0]; - drm_connector_attach_property(connector, - sdvo_priv->right_property, - sdvo_priv->right_margin); - DRM_DEBUG_KMS("h_overscan: max %d, " - "default %d, current %d\n", - data_value[0], data_value[1], response); - } - if (sdvo_data.overscan_v) { - intel_sdvo_write_cmd(intel_encoder, - SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0); - status = intel_sdvo_read_response(intel_encoder, - &data_value, 4); - if (status != SDVO_CMD_STATUS_SUCCESS) { - DRM_DEBUG_KMS("Incorrect SDVO max " - "v_overscan\n"); - return; - } - intel_sdvo_write_cmd(intel_encoder, - SDVO_CMD_GET_OVERSCAN_V, NULL, 0); - status = intel_sdvo_read_response(intel_encoder, - &response, 2); - if (status != SDVO_CMD_STATUS_SUCCESS) { - DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n"); - return; - } - sdvo_priv->max_vscan = data_value[0]; - sdvo_priv->top_margin = data_value[0] - response; - sdvo_priv->bottom_margin = sdvo_priv->top_margin; - sdvo_priv->top_property = - drm_property_create(dev, DRM_MODE_PROP_RANGE, - "top_margin", 2); - sdvo_priv->top_property->values[0] = 0; - sdvo_priv->top_property->values[1] = data_value[0]; - drm_connector_attach_property(connector, - sdvo_priv->top_property, - sdvo_priv->top_margin); - sdvo_priv->bottom_property = - drm_property_create(dev, DRM_MODE_PROP_RANGE, - "bottom_margin", 2); - sdvo_priv->bottom_property->values[0] = 0; - sdvo_priv->bottom_property->values[1] = data_value[0]; - drm_connector_attach_property(connector, - sdvo_priv->bottom_property, - sdvo_priv->bottom_margin); - DRM_DEBUG_KMS("v_overscan: max %d, " - "default %d, current %d\n", - data_value[0], data_value[1], response); - } - if (sdvo_data.position_h) { - intel_sdvo_write_cmd(intel_encoder, - SDVO_CMD_GET_MAX_POSITION_H, NULL, 0); - status = intel_sdvo_read_response(intel_encoder, - &data_value, 4); - if (status != SDVO_CMD_STATUS_SUCCESS) { - DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n"); - return; - } - intel_sdvo_write_cmd(intel_encoder, - SDVO_CMD_GET_POSITION_H, NULL, 0); - status = intel_sdvo_read_response(intel_encoder, - &response, 2); - if (status != SDVO_CMD_STATUS_SUCCESS) { - DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n"); - return; - } - sdvo_priv->max_hpos = data_value[0]; - sdvo_priv->cur_hpos = response; - sdvo_priv->hpos_property = - drm_property_create(dev, DRM_MODE_PROP_RANGE, - "hpos", 2); - sdvo_priv->hpos_property->values[0] = 0; - sdvo_priv->hpos_property->values[1] = data_value[0]; - drm_connector_attach_property(connector, - sdvo_priv->hpos_property, - sdvo_priv->cur_hpos); - DRM_DEBUG_KMS("h_position: max %d, " - "default %d, current %d\n", - data_value[0], data_value[1], response); - } - if (sdvo_data.position_v) { - intel_sdvo_write_cmd(intel_encoder, - SDVO_CMD_GET_MAX_POSITION_V, NULL, 0); - status = intel_sdvo_read_response(intel_encoder, - &data_value, 4); - if (status != SDVO_CMD_STATUS_SUCCESS) { - DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n"); - return; - } - intel_sdvo_write_cmd(intel_encoder, - SDVO_CMD_GET_POSITION_V, NULL, 0); - status = intel_sdvo_read_response(intel_encoder, - &response, 2); - if (status != SDVO_CMD_STATUS_SUCCESS) { - DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n"); - return; - } - sdvo_priv->max_vpos = data_value[0]; - sdvo_priv->cur_vpos = response; - sdvo_priv->vpos_property = - drm_property_create(dev, DRM_MODE_PROP_RANGE, - "vpos", 2); - sdvo_priv->vpos_property->values[0] = 0; - sdvo_priv->vpos_property->values[1] = data_value[0]; - drm_connector_attach_property(connector, - sdvo_priv->vpos_property, - sdvo_priv->cur_vpos); - DRM_DEBUG_KMS("v_position: max %d, " - "default %d, current %d\n", - data_value[0], data_value[1], response); - } - if (sdvo_data.saturation) { - intel_sdvo_write_cmd(intel_encoder, - SDVO_CMD_GET_MAX_SATURATION, NULL, 0); - status = intel_sdvo_read_response(intel_encoder, - &data_value, 4); - if (status != SDVO_CMD_STATUS_SUCCESS) { - DRM_DEBUG_KMS("Incorrect SDVO Max sat\n"); - return; - } - intel_sdvo_write_cmd(intel_encoder, - SDVO_CMD_GET_SATURATION, NULL, 0); - status = intel_sdvo_read_response(intel_encoder, - &response, 2); - if (status != SDVO_CMD_STATUS_SUCCESS) { - DRM_DEBUG_KMS("Incorrect SDVO get sat\n"); - return; - } - sdvo_priv->max_saturation = data_value[0]; - sdvo_priv->cur_saturation = response; - sdvo_priv->saturation_property = - drm_property_create(dev, DRM_MODE_PROP_RANGE, - "saturation", 2); - sdvo_priv->saturation_property->values[0] = 0; - sdvo_priv->saturation_property->values[1] = - data_value[0]; - drm_connector_attach_property(connector, - sdvo_priv->saturation_property, - sdvo_priv->cur_saturation); - DRM_DEBUG_KMS("saturation: max %d, " - "default %d, current %d\n", - data_value[0], data_value[1], response); - } - if (sdvo_data.contrast) { - intel_sdvo_write_cmd(intel_encoder, - SDVO_CMD_GET_MAX_CONTRAST, NULL, 0); - status = intel_sdvo_read_response(intel_encoder, - &data_value, 4); - if (status != SDVO_CMD_STATUS_SUCCESS) { - DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n"); - return; - } - intel_sdvo_write_cmd(intel_encoder, - SDVO_CMD_GET_CONTRAST, NULL, 0); - status = intel_sdvo_read_response(intel_encoder, - &response, 2); - if (status != SDVO_CMD_STATUS_SUCCESS) { - DRM_DEBUG_KMS("Incorrect SDVO get contrast\n"); - return; - } - sdvo_priv->max_contrast = data_value[0]; - sdvo_priv->cur_contrast = response; - sdvo_priv->contrast_property = - drm_property_create(dev, DRM_MODE_PROP_RANGE, - "contrast", 2); - sdvo_priv->contrast_property->values[0] = 0; - sdvo_priv->contrast_property->values[1] = data_value[0]; - drm_connector_attach_property(connector, - sdvo_priv->contrast_property, - sdvo_priv->cur_contrast); - DRM_DEBUG_KMS("contrast: max %d, " - "default %d, current %d\n", - data_value[0], data_value[1], response); - } - if (sdvo_data.hue) { - intel_sdvo_write_cmd(intel_encoder, - SDVO_CMD_GET_MAX_HUE, NULL, 0); - status = intel_sdvo_read_response(intel_encoder, - &data_value, 4); - if (status != SDVO_CMD_STATUS_SUCCESS) { - DRM_DEBUG_KMS("Incorrect SDVO Max hue\n"); - return; - } - intel_sdvo_write_cmd(intel_encoder, - SDVO_CMD_GET_HUE, NULL, 0); - status = intel_sdvo_read_response(intel_encoder, - &response, 2); - if (status != SDVO_CMD_STATUS_SUCCESS) { - DRM_DEBUG_KMS("Incorrect SDVO get hue\n"); - return; - } - sdvo_priv->max_hue = data_value[0]; - sdvo_priv->cur_hue = response; - sdvo_priv->hue_property = - drm_property_create(dev, DRM_MODE_PROP_RANGE, - "hue", 2); - sdvo_priv->hue_property->values[0] = 0; - sdvo_priv->hue_property->values[1] = - data_value[0]; - drm_connector_attach_property(connector, - sdvo_priv->hue_property, - sdvo_priv->cur_hue); - DRM_DEBUG_KMS("hue: max %d, default %d, current %d\n", - data_value[0], data_value[1], response); - } + + ENHANCEMENT(hpos, HPOS); + ENHANCEMENT(vpos, VPOS); + ENHANCEMENT(saturation, SATURATION); + ENHANCEMENT(contrast, CONTRAST); + ENHANCEMENT(hue, HUE); + ENHANCEMENT(sharpness, SHARPNESS); + ENHANCEMENT(brightness, BRIGHTNESS); + ENHANCEMENT(flicker_filter, FLICKER_FILTER); + ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE); + ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D); + ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER); + ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER); + + if (enhancements.dot_crawl) { + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2)) + return false; + + intel_sdvo_connector->max_dot_crawl = 1; + intel_sdvo_connector->cur_dot_crawl = response & 0x1; + intel_sdvo_connector->dot_crawl = + drm_property_create(dev, DRM_MODE_PROP_RANGE, "dot_crawl", 2); + if (!intel_sdvo_connector->dot_crawl) + return false; + + intel_sdvo_connector->dot_crawl->values[0] = 0; + intel_sdvo_connector->dot_crawl->values[1] = 1; + drm_connector_attach_property(connector, + intel_sdvo_connector->dot_crawl, + intel_sdvo_connector->cur_dot_crawl); + DRM_DEBUG_KMS("dot crawl: current %d\n", response); } - if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) { - if (sdvo_data.brightness) { - intel_sdvo_write_cmd(intel_encoder, - SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); - status = intel_sdvo_read_response(intel_encoder, - &data_value, 4); - if (status != SDVO_CMD_STATUS_SUCCESS) { - DRM_DEBUG_KMS("Incorrect SDVO Max bright\n"); - return; - } - intel_sdvo_write_cmd(intel_encoder, - SDVO_CMD_GET_BRIGHTNESS, NULL, 0); - status = intel_sdvo_read_response(intel_encoder, - &response, 2); - if (status != SDVO_CMD_STATUS_SUCCESS) { - DRM_DEBUG_KMS("Incorrect SDVO get brigh\n"); - return; - } - sdvo_priv->max_brightness = data_value[0]; - sdvo_priv->cur_brightness = response; - sdvo_priv->brightness_property = - drm_property_create(dev, DRM_MODE_PROP_RANGE, - "brightness", 2); - sdvo_priv->brightness_property->values[0] = 0; - sdvo_priv->brightness_property->values[1] = - data_value[0]; - drm_connector_attach_property(connector, - sdvo_priv->brightness_property, - sdvo_priv->cur_brightness); - DRM_DEBUG_KMS("brightness: max %d, " - "default %d, current %d\n", - data_value[0], data_value[1], response); - } + + return true; +} + +static bool +intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo, + struct intel_sdvo_connector *intel_sdvo_connector, + struct intel_sdvo_enhancements_reply enhancements) +{ + struct drm_device *dev = intel_sdvo->base.enc.dev; + struct drm_connector *connector = &intel_sdvo_connector->base.base; + uint16_t response, data_value[2]; + + ENHANCEMENT(brightness, BRIGHTNESS); + + return true; +} +#undef ENHANCEMENT + +static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, + struct intel_sdvo_connector *intel_sdvo_connector) +{ + union { + struct intel_sdvo_enhancements_reply reply; + uint16_t response; + } enhancements; + + if (!intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, + &enhancements, sizeof(enhancements))) + return false; + + if (enhancements.response == 0) { + DRM_DEBUG_KMS("No enhancement is supported\n"); + return true; } - return; + + if (IS_TV(intel_sdvo_connector)) + return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply); + else if(IS_LVDS(intel_sdvo_connector)) + return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply); + else + return true; + } bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *intel_encoder; - struct intel_sdvo_priv *sdvo_priv; + struct intel_sdvo *intel_sdvo; u8 ch[0x40]; int i; u32 i2c_reg, ddc_reg, analog_ddc_reg; - intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); - if (!intel_encoder) { + intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); + if (!intel_sdvo) return false; - } - sdvo_priv = (struct intel_sdvo_priv *)(intel_encoder + 1); - sdvo_priv->sdvo_reg = sdvo_reg; + intel_sdvo->sdvo_reg = sdvo_reg; - intel_encoder->dev_priv = sdvo_priv; + intel_encoder = &intel_sdvo->base; intel_encoder->type = INTEL_OUTPUT_SDVO; if (HAS_PCH_SPLIT(dev)) { @@ -2795,14 +2577,14 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) if (!intel_encoder->i2c_bus) goto err_inteloutput; - sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg); + intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg); /* Save the bit-banging i2c functionality for use by the DDC wrapper */ intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality; /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { - if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) { + if (!intel_sdvo_read_byte(intel_sdvo, i, &ch[i])) { DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", IS_SDVOB(sdvo_reg) ? 'B' : 'C'); goto err_i2c; @@ -2812,17 +2594,16 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) /* setup the DDC bus. */ if (IS_SDVOB(sdvo_reg)) { intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS"); - sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, + intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, "SDVOB/VGA DDC BUS"); dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; } else { intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS"); - sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, + intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, "SDVOC/VGA DDC BUS"); dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; } - - if (intel_encoder->ddc_bus == NULL) + if (intel_encoder->ddc_bus == NULL || intel_sdvo->analog_ddc_bus == NULL) goto err_i2c; /* Wrap with our custom algo which switches to DDC mode */ @@ -2833,53 +2614,56 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs); /* In default case sdvo lvds is false */ - intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps); + if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) + goto err_enc; - if (intel_sdvo_output_setup(intel_encoder, - sdvo_priv->caps.output_flags) != true) { + if (intel_sdvo_output_setup(intel_sdvo, + intel_sdvo->caps.output_flags) != true) { DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", IS_SDVOB(sdvo_reg) ? 'B' : 'C'); - goto err_i2c; + goto err_enc; } - intel_sdvo_select_ddc_bus(dev_priv, sdvo_priv, sdvo_reg); + intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); /* Set the input timing to the screen. Assume always input 0. */ - intel_sdvo_set_target_input(intel_encoder, true, false); - - intel_sdvo_get_input_pixel_clock_range(intel_encoder, - &sdvo_priv->pixel_clock_min, - &sdvo_priv->pixel_clock_max); + if (!intel_sdvo_set_target_input(intel_sdvo)) + goto err_enc; + if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo, + &intel_sdvo->pixel_clock_min, + &intel_sdvo->pixel_clock_max)) + goto err_enc; DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " "clock range %dMHz - %dMHz, " "input 1: %c, input 2: %c, " "output 1: %c, output 2: %c\n", - SDVO_NAME(sdvo_priv), - sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id, - sdvo_priv->caps.device_rev_id, - sdvo_priv->pixel_clock_min / 1000, - sdvo_priv->pixel_clock_max / 1000, - (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N', - (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N', + SDVO_NAME(intel_sdvo), + intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id, + intel_sdvo->caps.device_rev_id, + intel_sdvo->pixel_clock_min / 1000, + intel_sdvo->pixel_clock_max / 1000, + (intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N', + (intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N', /* check currently supported outputs */ - sdvo_priv->caps.output_flags & + intel_sdvo->caps.output_flags & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N', - sdvo_priv->caps.output_flags & + intel_sdvo->caps.output_flags & (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); - return true; +err_enc: + drm_encoder_cleanup(&intel_encoder->enc); err_i2c: - if (sdvo_priv->analog_ddc_bus != NULL) - intel_i2c_destroy(sdvo_priv->analog_ddc_bus); + if (intel_sdvo->analog_ddc_bus != NULL) + intel_i2c_destroy(intel_sdvo->analog_ddc_bus); if (intel_encoder->ddc_bus != NULL) intel_i2c_destroy(intel_encoder->ddc_bus); if (intel_encoder->i2c_bus != NULL) intel_i2c_destroy(intel_encoder->i2c_bus); err_inteloutput: - kfree(intel_encoder); + kfree(intel_sdvo); return false; } diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h index ba5cdf8ae40..a386b022e53 100644 --- a/drivers/gpu/drm/i915/intel_sdvo_regs.h +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h @@ -312,7 +312,7 @@ struct intel_sdvo_set_target_input_args { # define SDVO_CLOCK_RATE_MULT_4X (1 << 3) #define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27 -/** 5 bytes of bit flags for TV formats shared by all TV format functions */ +/** 6 bytes of bit flags for TV formats shared by all TV format functions */ struct intel_sdvo_tv_format { unsigned int ntsc_m:1; unsigned int ntsc_j:1; @@ -596,32 +596,32 @@ struct intel_sdvo_enhancements_reply { unsigned int overscan_h:1; unsigned int overscan_v:1; - unsigned int position_h:1; - unsigned int position_v:1; + unsigned int hpos:1; + unsigned int vpos:1; unsigned int sharpness:1; unsigned int dot_crawl:1; unsigned int dither:1; - unsigned int max_tv_chroma_filter:1; - unsigned int max_tv_luma_filter:1; + unsigned int tv_chroma_filter:1; + unsigned int tv_luma_filter:1; } __attribute__((packed)); /* Picture enhancement limits below are dependent on the current TV format, * and thus need to be queried and set after it. */ -#define SDVO_CMD_GET_MAX_FLICKER_FITER 0x4d -#define SDVO_CMD_GET_MAX_ADAPTIVE_FLICKER_FITER 0x7b -#define SDVO_CMD_GET_MAX_2D_FLICKER_FITER 0x52 +#define SDVO_CMD_GET_MAX_FLICKER_FILTER 0x4d +#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE 0x7b +#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D 0x52 #define SDVO_CMD_GET_MAX_SATURATION 0x55 #define SDVO_CMD_GET_MAX_HUE 0x58 #define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b #define SDVO_CMD_GET_MAX_CONTRAST 0x5e #define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61 #define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64 -#define SDVO_CMD_GET_MAX_POSITION_H 0x67 -#define SDVO_CMD_GET_MAX_POSITION_V 0x6a -#define SDVO_CMD_GET_MAX_SHARPNESS_V 0x6d -#define SDVO_CMD_GET_MAX_TV_CHROMA 0x74 -#define SDVO_CMD_GET_MAX_TV_LUMA 0x77 +#define SDVO_CMD_GET_MAX_HPOS 0x67 +#define SDVO_CMD_GET_MAX_VPOS 0x6a +#define SDVO_CMD_GET_MAX_SHARPNESS 0x6d +#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74 +#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77 struct intel_sdvo_enhancement_limits_reply { u16 max_value; u16 default_value; @@ -638,10 +638,10 @@ struct intel_sdvo_enhancement_limits_reply { #define SDVO_CMD_GET_FLICKER_FILTER 0x4e #define SDVO_CMD_SET_FLICKER_FILTER 0x4f -#define SDVO_CMD_GET_ADAPTIVE_FLICKER_FITER 0x50 -#define SDVO_CMD_SET_ADAPTIVE_FLICKER_FITER 0x51 -#define SDVO_CMD_GET_2D_FLICKER_FITER 0x53 -#define SDVO_CMD_SET_2D_FLICKER_FITER 0x54 +#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE 0x50 +#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE 0x51 +#define SDVO_CMD_GET_FLICKER_FILTER_2D 0x53 +#define SDVO_CMD_SET_FLICKER_FILTER_2D 0x54 #define SDVO_CMD_GET_SATURATION 0x56 #define SDVO_CMD_SET_SATURATION 0x57 #define SDVO_CMD_GET_HUE 0x59 @@ -654,16 +654,16 @@ struct intel_sdvo_enhancement_limits_reply { #define SDVO_CMD_SET_OVERSCAN_H 0x63 #define SDVO_CMD_GET_OVERSCAN_V 0x65 #define SDVO_CMD_SET_OVERSCAN_V 0x66 -#define SDVO_CMD_GET_POSITION_H 0x68 -#define SDVO_CMD_SET_POSITION_H 0x69 -#define SDVO_CMD_GET_POSITION_V 0x6b -#define SDVO_CMD_SET_POSITION_V 0x6c +#define SDVO_CMD_GET_HPOS 0x68 +#define SDVO_CMD_SET_HPOS 0x69 +#define SDVO_CMD_GET_VPOS 0x6b +#define SDVO_CMD_SET_VPOS 0x6c #define SDVO_CMD_GET_SHARPNESS 0x6e #define SDVO_CMD_SET_SHARPNESS 0x6f -#define SDVO_CMD_GET_TV_CHROMA 0x75 -#define SDVO_CMD_SET_TV_CHROMA 0x76 -#define SDVO_CMD_GET_TV_LUMA 0x78 -#define SDVO_CMD_SET_TV_LUMA 0x79 +#define SDVO_CMD_GET_TV_CHROMA_FILTER 0x75 +#define SDVO_CMD_SET_TV_CHROMA_FILTER 0x76 +#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78 +#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79 struct intel_sdvo_enhancements_arg { u16 value; }__attribute__((packed)); diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index cc3726a4a1c..c671f60ce80 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -44,7 +44,9 @@ enum tv_margin { }; /** Private structure for the integrated TV support */ -struct intel_tv_priv { +struct intel_tv { + struct intel_encoder base; + int type; char *tv_format; int margin[4]; @@ -896,6 +898,11 @@ static const struct tv_mode tv_modes[] = { }, }; +static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder) +{ + return container_of(enc_to_intel_encoder(encoder), struct intel_tv, base); +} + static void intel_tv_dpms(struct drm_encoder *encoder, int mode) { @@ -929,19 +936,17 @@ intel_tv_mode_lookup (char *tv_format) } static const struct tv_mode * -intel_tv_mode_find (struct intel_encoder *intel_encoder) +intel_tv_mode_find (struct intel_tv *intel_tv) { - struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; - - return intel_tv_mode_lookup(tv_priv->tv_format); + return intel_tv_mode_lookup(intel_tv->tv_format); } static enum drm_mode_status intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); + struct intel_tv *intel_tv = enc_to_intel_tv(encoder); + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); /* Ensure TV refresh is close to desired refresh */ if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) @@ -957,8 +962,8 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, { struct drm_device *dev = encoder->dev; struct drm_mode_config *drm_config = &dev->mode_config; - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - const struct tv_mode *tv_mode = intel_tv_mode_find (intel_encoder); + struct intel_tv *intel_tv = enc_to_intel_tv(encoder); + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); struct drm_encoder *other_encoder; if (!tv_mode) @@ -983,9 +988,8 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; - const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); + struct intel_tv *intel_tv = enc_to_intel_tv(encoder); + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); u32 tv_ctl; u32 hctl1, hctl2, hctl3; u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7; @@ -1001,7 +1005,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, tv_ctl = I915_READ(TV_CTL); tv_ctl &= TV_CTL_SAVE; - switch (tv_priv->type) { + switch (intel_tv->type) { default: case DRM_MODE_CONNECTOR_Unknown: case DRM_MODE_CONNECTOR_Composite: @@ -1154,11 +1158,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, /* Wait for vblank for the disable to take effect */ if (!IS_I9XX(dev)) - intel_wait_for_vblank(dev); + intel_wait_for_vblank(dev, intel_crtc->pipe); I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE); /* Wait for vblank for the disable to take effect. */ - intel_wait_for_vblank(dev); + intel_wait_for_vblank(dev, intel_crtc->pipe); /* Filter ctl must be set before TV_WIN_SIZE */ I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE); @@ -1168,12 +1172,12 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, else ysize = 2*tv_mode->nbr_end + 1; - xpos += tv_priv->margin[TV_MARGIN_LEFT]; - ypos += tv_priv->margin[TV_MARGIN_TOP]; - xsize -= (tv_priv->margin[TV_MARGIN_LEFT] + - tv_priv->margin[TV_MARGIN_RIGHT]); - ysize -= (tv_priv->margin[TV_MARGIN_TOP] + - tv_priv->margin[TV_MARGIN_BOTTOM]); + xpos += intel_tv->margin[TV_MARGIN_LEFT]; + ypos += intel_tv->margin[TV_MARGIN_TOP]; + xsize -= (intel_tv->margin[TV_MARGIN_LEFT] + + intel_tv->margin[TV_MARGIN_RIGHT]); + ysize -= (intel_tv->margin[TV_MARGIN_TOP] + + intel_tv->margin[TV_MARGIN_BOTTOM]); I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos); I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize); @@ -1222,9 +1226,9 @@ static const struct drm_display_mode reported_modes[] = { * \return false if TV is disconnected. */ static int -intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder) +intel_tv_detect_type (struct intel_tv *intel_tv) { - struct drm_encoder *encoder = &intel_encoder->enc; + struct drm_encoder *encoder = &intel_tv->base.enc; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; @@ -1263,11 +1267,15 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder DAC_C_0_7_V); I915_WRITE(TV_CTL, tv_ctl); I915_WRITE(TV_DAC, tv_dac); - intel_wait_for_vblank(dev); + POSTING_READ(TV_DAC); + msleep(20); + tv_dac = I915_READ(TV_DAC); I915_WRITE(TV_DAC, save_tv_dac); I915_WRITE(TV_CTL, save_tv_ctl); - intel_wait_for_vblank(dev); + POSTING_READ(TV_CTL); + msleep(20); + /* * A B C * 0 1 1 Composite @@ -1304,12 +1312,11 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder static void intel_tv_find_better_format(struct drm_connector *connector) { struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; - const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); + struct intel_tv *intel_tv = enc_to_intel_tv(encoder); + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); int i; - if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) == + if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) == tv_mode->component_only) return; @@ -1317,12 +1324,12 @@ static void intel_tv_find_better_format(struct drm_connector *connector) for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) { tv_mode = tv_modes + i; - if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) == + if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) == tv_mode->component_only) break; } - tv_priv->tv_format = tv_mode->name; + intel_tv->tv_format = tv_mode->name; drm_connector_property_set_value(connector, connector->dev->mode_config.tv_mode_property, i); } @@ -1336,31 +1343,31 @@ static void intel_tv_find_better_format(struct drm_connector *connector) static enum drm_connector_status intel_tv_detect(struct drm_connector *connector) { - struct drm_crtc *crtc; struct drm_display_mode mode; struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; - int dpms_mode; - int type = tv_priv->type; + struct intel_tv *intel_tv = enc_to_intel_tv(encoder); + int type; mode = reported_modes[0]; drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); if (encoder->crtc && encoder->crtc->enabled) { - type = intel_tv_detect_type(encoder->crtc, intel_encoder); + type = intel_tv_detect_type(intel_tv); } else { - crtc = intel_get_load_detect_pipe(intel_encoder, connector, + struct drm_crtc *crtc; + int dpms_mode; + + crtc = intel_get_load_detect_pipe(&intel_tv->base, connector, &mode, &dpms_mode); if (crtc) { - type = intel_tv_detect_type(crtc, intel_encoder); - intel_release_load_detect_pipe(intel_encoder, connector, + type = intel_tv_detect_type(intel_tv); + intel_release_load_detect_pipe(&intel_tv->base, connector, dpms_mode); } else type = -1; } - tv_priv->type = type; + intel_tv->type = type; if (type < 0) return connector_status_disconnected; @@ -1391,8 +1398,8 @@ intel_tv_chose_preferred_modes(struct drm_connector *connector, struct drm_display_mode *mode_ptr) { struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); + struct intel_tv *intel_tv = enc_to_intel_tv(encoder); + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; @@ -1417,8 +1424,8 @@ intel_tv_get_modes(struct drm_connector *connector) { struct drm_display_mode *mode_ptr; struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); + struct intel_tv *intel_tv = enc_to_intel_tv(encoder); + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); int j, count = 0; u64 tmp; @@ -1483,8 +1490,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop { struct drm_device *dev = connector->dev; struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; + struct intel_tv *intel_tv = enc_to_intel_tv(encoder); struct drm_crtc *crtc = encoder->crtc; int ret = 0; bool changed = false; @@ -1494,30 +1500,30 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop goto out; if (property == dev->mode_config.tv_left_margin_property && - tv_priv->margin[TV_MARGIN_LEFT] != val) { - tv_priv->margin[TV_MARGIN_LEFT] = val; + intel_tv->margin[TV_MARGIN_LEFT] != val) { + intel_tv->margin[TV_MARGIN_LEFT] = val; changed = true; } else if (property == dev->mode_config.tv_right_margin_property && - tv_priv->margin[TV_MARGIN_RIGHT] != val) { - tv_priv->margin[TV_MARGIN_RIGHT] = val; + intel_tv->margin[TV_MARGIN_RIGHT] != val) { + intel_tv->margin[TV_MARGIN_RIGHT] = val; changed = true; } else if (property == dev->mode_config.tv_top_margin_property && - tv_priv->margin[TV_MARGIN_TOP] != val) { - tv_priv->margin[TV_MARGIN_TOP] = val; + intel_tv->margin[TV_MARGIN_TOP] != val) { + intel_tv->margin[TV_MARGIN_TOP] = val; changed = true; } else if (property == dev->mode_config.tv_bottom_margin_property && - tv_priv->margin[TV_MARGIN_BOTTOM] != val) { - tv_priv->margin[TV_MARGIN_BOTTOM] = val; + intel_tv->margin[TV_MARGIN_BOTTOM] != val) { + intel_tv->margin[TV_MARGIN_BOTTOM] = val; changed = true; } else if (property == dev->mode_config.tv_mode_property) { if (val >= ARRAY_SIZE(tv_modes)) { ret = -EINVAL; goto out; } - if (!strcmp(tv_priv->tv_format, tv_modes[val].name)) + if (!strcmp(intel_tv->tv_format, tv_modes[val].name)) goto out; - tv_priv->tv_format = tv_modes[val].name; + intel_tv->tv_format = tv_modes[val].name; changed = true; } else { ret = -EINVAL; @@ -1553,16 +1559,8 @@ static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = .best_encoder = intel_attached_encoder, }; -static void intel_tv_enc_destroy(struct drm_encoder *encoder) -{ - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - - drm_encoder_cleanup(encoder); - kfree(intel_encoder); -} - static const struct drm_encoder_funcs intel_tv_enc_funcs = { - .destroy = intel_tv_enc_destroy, + .destroy = intel_encoder_destroy, }; /* @@ -1606,9 +1604,9 @@ intel_tv_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_connector *connector; + struct intel_tv *intel_tv; struct intel_encoder *intel_encoder; struct intel_connector *intel_connector; - struct intel_tv_priv *tv_priv; u32 tv_dac_on, tv_dac_off, save_tv_dac; char **tv_format_names; int i, initial_mode = 0; @@ -1647,18 +1645,18 @@ intel_tv_init(struct drm_device *dev) (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) return; - intel_encoder = kzalloc(sizeof(struct intel_encoder) + - sizeof(struct intel_tv_priv), GFP_KERNEL); - if (!intel_encoder) { + intel_tv = kzalloc(sizeof(struct intel_tv), GFP_KERNEL); + if (!intel_tv) { return; } intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); if (!intel_connector) { - kfree(intel_encoder); + kfree(intel_tv); return; } + intel_encoder = &intel_tv->base; connector = &intel_connector->base; drm_connector_init(dev, connector, &intel_tv_connector_funcs, @@ -1668,22 +1666,20 @@ intel_tv_init(struct drm_device *dev) DRM_MODE_ENCODER_TVDAC); drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc); - tv_priv = (struct intel_tv_priv *)(intel_encoder + 1); intel_encoder->type = INTEL_OUTPUT_TVOUT; intel_encoder->crtc_mask = (1 << 0) | (1 << 1); intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT); intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1)); intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); - intel_encoder->dev_priv = tv_priv; - tv_priv->type = DRM_MODE_CONNECTOR_Unknown; + intel_tv->type = DRM_MODE_CONNECTOR_Unknown; /* BIOS margin values */ - tv_priv->margin[TV_MARGIN_LEFT] = 54; - tv_priv->margin[TV_MARGIN_TOP] = 36; - tv_priv->margin[TV_MARGIN_RIGHT] = 46; - tv_priv->margin[TV_MARGIN_BOTTOM] = 37; + intel_tv->margin[TV_MARGIN_LEFT] = 54; + intel_tv->margin[TV_MARGIN_TOP] = 36; + intel_tv->margin[TV_MARGIN_RIGHT] = 46; + intel_tv->margin[TV_MARGIN_BOTTOM] = 37; - tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); + intel_tv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs); drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); @@ -1703,16 +1699,16 @@ intel_tv_init(struct drm_device *dev) initial_mode); drm_connector_attach_property(connector, dev->mode_config.tv_left_margin_property, - tv_priv->margin[TV_MARGIN_LEFT]); + intel_tv->margin[TV_MARGIN_LEFT]); drm_connector_attach_property(connector, dev->mode_config.tv_top_margin_property, - tv_priv->margin[TV_MARGIN_TOP]); + intel_tv->margin[TV_MARGIN_TOP]); drm_connector_attach_property(connector, dev->mode_config.tv_right_margin_property, - tv_priv->margin[TV_MARGIN_RIGHT]); + intel_tv->margin[TV_MARGIN_RIGHT]); drm_connector_attach_property(connector, dev->mode_config.tv_bottom_margin_property, - tv_priv->margin[TV_MARGIN_BOTTOM]); + intel_tv->margin[TV_MARGIN_BOTTOM]); out: drm_sysfs_connector_add(connector); } diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c index fff82045c42..9ce2827f8c0 100644 --- a/drivers/gpu/drm/mga/mga_state.c +++ b/drivers/gpu/drm/mga/mga_state.c @@ -1085,19 +1085,19 @@ file_priv) } struct drm_ioctl_desc mga_ioctls[] = { - DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH), - DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH), - DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH), - DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH), - DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH), - DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH), - DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH), - DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH), - DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH), - DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH), - DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH), - DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH), + DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH), + DRM_IOCTL_DEF_DRV(MGA_SWAP, mga_dma_swap, DRM_AUTH), + DRM_IOCTL_DEF_DRV(MGA_CLEAR, mga_dma_clear, DRM_AUTH), + DRM_IOCTL_DEF_DRV(MGA_VERTEX, mga_dma_vertex, DRM_AUTH), + DRM_IOCTL_DEF_DRV(MGA_INDICES, mga_dma_indices, DRM_AUTH), + DRM_IOCTL_DEF_DRV(MGA_ILOAD, mga_dma_iload, DRM_AUTH), + DRM_IOCTL_DEF_DRV(MGA_BLIT, mga_dma_blit, DRM_AUTH), + DRM_IOCTL_DEF_DRV(MGA_GETPARAM, mga_getparam, DRM_AUTH), + DRM_IOCTL_DEF_DRV(MGA_SET_FENCE, mga_set_fence, DRM_AUTH), + DRM_IOCTL_DEF_DRV(MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH), + DRM_IOCTL_DEF_DRV(MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), }; int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls); diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 0b69a9628c9..974b0f8ae04 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -2166,7 +2166,7 @@ peek_fb(struct drm_device *dev, struct io_mapping *fb, uint32_t val = 0; if (off < pci_resource_len(dev->pdev, 1)) { - uint32_t __iomem *p = + uint8_t __iomem *p = io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0); val = ioread32(p + (off & ~PAGE_MASK)); @@ -2182,7 +2182,7 @@ poke_fb(struct drm_device *dev, struct io_mapping *fb, uint32_t off, uint32_t val) { if (off < pci_resource_len(dev->pdev, 1)) { - uint32_t __iomem *p = + uint8_t __iomem *p = io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0); iowrite32(val, p + (off & ~PAGE_MASK)); @@ -3869,27 +3869,10 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr } #ifdef __powerpc__ /* Powerbook specific quirks */ - if ((dev->pci_device & 0xffff) == 0x0179 || - (dev->pci_device & 0xffff) == 0x0189 || - (dev->pci_device & 0xffff) == 0x0329) { - if (script == LVDS_RESET) { - nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72); - - } else if (script == LVDS_PANEL_ON) { - bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, - bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) - | (1 << 31)); - bios_wr32(bios, NV_PCRTC_GPIO_EXT, - bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1); - - } else if (script == LVDS_PANEL_OFF) { - bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, - bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) - & ~(1 << 31)); - bios_wr32(bios, NV_PCRTC_GPIO_EXT, - bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3); - } - } + if (script == LVDS_RESET && + (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 || + dev->pci_device == 0x0329)) + nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72); #endif return 0; @@ -4381,11 +4364,8 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b * * For the moment, a quirk will do :) */ - if ((dev->pdev->device == 0x01d7) && - (dev->pdev->subsystem_vendor == 0x1028) && - (dev->pdev->subsystem_device == 0x01c2)) { + if (nv_match_device(dev, 0x01d7, 0x1028, 0x01c2)) bios->fp.duallink_transition_clk = 80000; - } /* set dual_link flag for EDID case */ if (pxclk && (chip_version < 0x25 || chip_version > 0x28)) @@ -4587,7 +4567,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, return 1; } - NV_TRACE(dev, "0x%04X: parsing output script 0\n", script); + NV_DEBUG_KMS(dev, "0x%04X: parsing output script 0\n", script); nouveau_bios_run_init_table(dev, script, dcbent); } else if (pxclk == -1) { @@ -4597,7 +4577,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, return 1; } - NV_TRACE(dev, "0x%04X: parsing output script 1\n", script); + NV_DEBUG_KMS(dev, "0x%04X: parsing output script 1\n", script); nouveau_bios_run_init_table(dev, script, dcbent); } else if (pxclk == -2) { @@ -4610,7 +4590,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, return 1; } - NV_TRACE(dev, "0x%04X: parsing output script 2\n", script); + NV_DEBUG_KMS(dev, "0x%04X: parsing output script 2\n", script); nouveau_bios_run_init_table(dev, script, dcbent); } else if (pxclk > 0) { @@ -4622,7 +4602,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, return 1; } - NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script); + NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 0\n", script); nouveau_bios_run_init_table(dev, script, dcbent); } else if (pxclk < 0) { @@ -4634,7 +4614,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, return 1; } - NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script); + NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 1\n", script); nouveau_bios_run_init_table(dev, script, dcbent); } @@ -5357,19 +5337,17 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios, } tmdstableptr = ROM16(bios->data[bitentry->offset]); - - if (tmdstableptr == 0x0) { + if (!tmdstableptr) { NV_ERROR(dev, "Pointer to TMDS table invalid\n"); return -EINVAL; } + NV_INFO(dev, "TMDS table version %d.%d\n", + bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf); + /* nv50+ has v2.0, but we don't parse it atm */ - if (bios->data[tmdstableptr] != 0x11) { - NV_WARN(dev, - "TMDS table revision %d.%d not currently supported\n", - bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf); + if (bios->data[tmdstableptr] != 0x11) return -ENOSYS; - } /* * These two scripts are odd: they don't seem to get run even when @@ -5809,6 +5787,20 @@ parse_dcb_gpio_table(struct nvbios *bios) gpio->line = tvdac_gpio[1] >> 4; gpio->invert = tvdac_gpio[0] & 2; } + } else { + /* + * No systematic way to store GPIO info on pre-v2.2 + * DCBs, try to match the PCI device IDs. + */ + + /* Apple iMac G4 NV18 */ + if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) { + struct dcb_gpio_entry *gpio = new_gpio_entry(bios); + + gpio->tag = DCB_GPIO_TVDAC0; + gpio->line = 4; + } + } if (!gpio_table_ptr) @@ -5884,9 +5876,7 @@ apply_dcb_connector_quirks(struct nvbios *bios, int idx) struct drm_device *dev = bios->dev; /* Gigabyte NX85T */ - if ((dev->pdev->device == 0x0421) && - (dev->pdev->subsystem_vendor == 0x1458) && - (dev->pdev->subsystem_device == 0x344c)) { + if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) { if (cte->type == DCB_CONNECTOR_HDMI_1) cte->type = DCB_CONNECTOR_DVI_I; } @@ -6139,7 +6129,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4; break; - case 0xe: + case OUTPUT_EOL: /* weird g80 mobile type that "nv" treats as a terminator */ dcb->entries--; return false; @@ -6176,22 +6166,14 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb, entry->type = OUTPUT_TV; break; case 2: - case 3: - entry->type = OUTPUT_LVDS; - break; case 4: - switch ((conn & 0x000000f0) >> 4) { - case 0: - entry->type = OUTPUT_TMDS; - break; - case 1: + if (conn & 0x10) entry->type = OUTPUT_LVDS; - break; - default: - NV_ERROR(dev, "Unknown DCB subtype 4/%d\n", - (conn & 0x000000f0) >> 4); - return false; - } + else + entry->type = OUTPUT_TMDS; + break; + case 3: + entry->type = OUTPUT_LVDS; break; default: NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f); @@ -6307,9 +6289,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) * nasty problems until this is sorted (assuming it's not a * VBIOS bug). */ - if ((dev->pdev->device == 0x040d) && - (dev->pdev->subsystem_vendor == 0x1028) && - (dev->pdev->subsystem_device == 0x019b)) { + if (nv_match_device(dev, 0x040d, 0x1028, 0x019b)) { if (*conn == 0x02026312 && *conf == 0x00000020) return false; } diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index fd14dfd3d78..c1de2f3fcb0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h @@ -95,6 +95,7 @@ enum dcb_type { OUTPUT_TMDS = 2, OUTPUT_LVDS = 3, OUTPUT_DP = 6, + OUTPUT_EOL = 14, /* DCB 4.0+, appears to be end-of-list */ OUTPUT_ANY = -1 }; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 84f85183d04..f6f44779d82 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -36,6 +36,21 @@ #include <linux/log2.h> #include <linux/slab.h> +int +nouveau_bo_sync_gpu(struct nouveau_bo *nvbo, struct nouveau_channel *chan) +{ + struct nouveau_fence *prev_fence = nvbo->bo.sync_obj; + int ret; + + if (!prev_fence || nouveau_fence_channel(prev_fence) == chan) + return 0; + + spin_lock(&nvbo->bo.lock); + ret = ttm_bo_wait(&nvbo->bo, false, false, false); + spin_unlock(&nvbo->bo.lock); + return ret; +} + static void nouveau_bo_del_ttm(struct ttm_buffer_object *bo) { diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 90fdcda332b..0480f064f2c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -426,18 +426,18 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, ***********************************/ struct drm_ioctl_desc nouveau_ioctls[] = { - DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), - DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), - DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH), - DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH), - DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH), - DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), - DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH), - DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH), - DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH), - DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH), - DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH), }; int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index b1b22baf142..a1473fff06a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -104,7 +104,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector, int i; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - struct nouveau_i2c_chan *i2c; + struct nouveau_i2c_chan *i2c = NULL; struct nouveau_encoder *nv_encoder; struct drm_mode_object *obj; int id; @@ -117,7 +117,9 @@ nouveau_connector_ddc_detect(struct drm_connector *connector, if (!obj) continue; nv_encoder = nouveau_encoder(obj_to_encoder(obj)); - i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); + + if (nv_encoder->dcb->i2c_index < 0xf) + i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); if (i2c && nouveau_probe_i2c_addr(i2c, 0x50)) { *pnv_encoder = nv_encoder; diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index e424bf74d70..b1be617373b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -1165,6 +1165,7 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index); extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); +extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *); /* nouveau_fence.c */ struct nouveau_fence; @@ -1388,6 +1389,15 @@ nv_two_reg_pll(struct drm_device *dev) return false; } +static inline bool +nv_match_device(struct drm_device *dev, unsigned device, + unsigned sub_vendor, unsigned sub_device) +{ + return dev->pdev->device == device && + dev->pdev->subsystem_vendor == sub_vendor && + dev->pdev->subsystem_device == sub_device; +} + #define NV_SW 0x0000506e #define NV_SW_DMA_SEMAPHORE 0x00000060 #define NV_SW_SEMAPHORE_OFFSET 0x00000064 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 6b208ffafa8..87ac21ec23d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -64,16 +64,17 @@ nouveau_fence_update(struct nouveau_channel *chan) struct nouveau_fence *fence; uint32_t sequence; + spin_lock(&chan->fence.lock); + if (USE_REFCNT) sequence = nvchan_rd32(chan, 0x48); else sequence = atomic_read(&chan->fence.last_sequence_irq); if (chan->fence.sequence_ack == sequence) - return; + goto out; chan->fence.sequence_ack = sequence; - spin_lock(&chan->fence.lock); list_for_each_safe(entry, tmp, &chan->fence.pending) { fence = list_entry(entry, struct nouveau_fence, entry); @@ -85,6 +86,7 @@ nouveau_fence_update(struct nouveau_channel *chan) if (sequence == chan->fence.sequence_ack) break; } +out: spin_unlock(&chan->fence.lock); } diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 0f417ac1b69..ead7b8fc53f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -245,7 +245,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence) list_del(&nvbo->entry); nvbo->reserved_by = NULL; ttm_bo_unreserve(&nvbo->bo); - drm_gem_object_unreference(nvbo->gem); + drm_gem_object_unreference_unlocked(nvbo->gem); } } @@ -300,7 +300,7 @@ retry: validate_fini(op, NULL); if (ret == -EAGAIN) ret = ttm_bo_wait_unreserved(&nvbo->bo, false); - drm_gem_object_unreference(gem); + drm_gem_object_unreference_unlocked(gem); if (ret) { NV_ERROR(dev, "fail reserve\n"); return ret; @@ -337,7 +337,9 @@ retry: return -EINVAL; } + mutex_unlock(&drm_global_mutex); ret = ttm_bo_wait_cpu(&nvbo->bo, false); + mutex_lock(&drm_global_mutex); if (ret) { NV_ERROR(dev, "fail wait_cpu\n"); return ret; @@ -361,16 +363,11 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, list_for_each_entry(nvbo, list, entry) { struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; - struct nouveau_fence *prev_fence = nvbo->bo.sync_obj; - if (prev_fence && nouveau_fence_channel(prev_fence) != chan) { - spin_lock(&nvbo->bo.lock); - ret = ttm_bo_wait(&nvbo->bo, false, false, false); - spin_unlock(&nvbo->bo.lock); - if (unlikely(ret)) { - NV_ERROR(dev, "fail wait other chan\n"); - return ret; - } + ret = nouveau_bo_sync_gpu(nvbo, chan); + if (unlikely(ret)) { + NV_ERROR(dev, "fail pre-validate sync\n"); + return ret; } ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, @@ -381,7 +378,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, return ret; } - nvbo->channel = chan; + nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan; ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, false, false, false); nvbo->channel = NULL; @@ -390,6 +387,12 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, return ret; } + ret = nouveau_bo_sync_gpu(nvbo, chan); + if (unlikely(ret)) { + NV_ERROR(dev, "fail post-validate sync\n"); + return ret; + } + if (nvbo->bo.offset == b->presumed.offset && ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || @@ -613,7 +616,20 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, return PTR_ERR(bo); } - mutex_lock(&dev->struct_mutex); + /* Mark push buffers as being used on PFIFO, the validation code + * will then make sure that if the pushbuf bo moves, that they + * happen on the kernel channel, which will in turn cause a sync + * to happen before we try and submit the push buffer. + */ + for (i = 0; i < req->nr_push; i++) { + if (push[i].bo_index >= req->nr_buffers) { + NV_ERROR(dev, "push %d buffer not in list\n", i); + ret = -EINVAL; + goto out; + } + + bo[push[i].bo_index].read_domains |= (1 << 31); + } /* Validate buffer list */ ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, @@ -647,7 +663,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, push[i].length); } } else - if (dev_priv->card_type >= NV_20) { + if (dev_priv->chipset >= 0x25) { ret = RING_SPACE(chan, req->nr_push * 2); if (ret) { NV_ERROR(dev, "cal_space: %d\n", ret); @@ -713,7 +729,6 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, out: validate_fini(&op, fence); nouveau_fence_unref((void**)&fence); - mutex_unlock(&dev->struct_mutex); kfree(bo); kfree(push); @@ -722,7 +737,7 @@ out_next: req->suffix0 = 0x00000000; req->suffix1 = 0x00000000; } else - if (dev_priv->card_type >= NV_20) { + if (dev_priv->chipset >= 0x25) { req->suffix0 = 0x00020000; req->suffix1 = 0x00000000; } else { diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c index 0bd407ca3d4..84614858728 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.c +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c @@ -163,7 +163,7 @@ nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index) if (entry->chan) return -EEXIST; - if (dev_priv->card_type == NV_C0 && entry->read >= NV50_I2C_PORTS) { + if (dev_priv->card_type >= NV_50 && entry->read >= NV50_I2C_PORTS) { NV_ERROR(dev, "unknown i2c port %d\n", entry->read); return -EINVAL; } diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 491767fe4fc..6b9187d7f67 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -214,6 +214,7 @@ int nouveau_sgdma_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct pci_dev *pdev = dev->pdev; struct nouveau_gpuobj *gpuobj = NULL; uint32_t aper_size, obj_size; int i, ret; @@ -239,10 +240,19 @@ nouveau_sgdma_init(struct drm_device *dev) dev_priv->gart_info.sg_dummy_page = alloc_page(GFP_KERNEL|__GFP_DMA32); + if (!dev_priv->gart_info.sg_dummy_page) { + nouveau_gpuobj_del(dev, &gpuobj); + return -ENOMEM; + } + set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags); dev_priv->gart_info.sg_dummy_bus = - pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0, + pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) { + nouveau_gpuobj_del(dev, &gpuobj); + return -EFAULT; + } if (dev_priv->card_type < NV_50) { /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c index a5dcf768580..0d3206a7046 100644 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c @@ -444,6 +444,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder) struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct dcb_entry *dcbe = nv_encoder->dcb; int head = nouveau_crtc(encoder->crtc)->index; + struct drm_encoder *slave_encoder; if (dcbe->type == OUTPUT_TMDS) run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock); @@ -462,9 +463,10 @@ static void nv04_dfp_commit(struct drm_encoder *encoder) NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); /* Init external transmitters */ - if (get_tmds_slave(encoder)) - get_slave_funcs(get_tmds_slave(encoder))->mode_set( - encoder, &nv_encoder->mode, &nv_encoder->mode); + slave_encoder = get_tmds_slave(encoder); + if (slave_encoder) + get_slave_funcs(slave_encoder)->mode_set( + slave_encoder, &nv_encoder->mode, &nv_encoder->mode); helper->dpms(encoder, DRM_MODE_DPMS_ON); @@ -473,6 +475,27 @@ static void nv04_dfp_commit(struct drm_encoder *encoder) nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); } +static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode) +{ +#ifdef __powerpc__ + struct drm_device *dev = encoder->dev; + + /* BIOS scripts usually take care of the backlight, thanks + * Apple for your consistency. + */ + if (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 || + dev->pci_device == 0x0329) { + if (mode == DRM_MODE_DPMS_ON) { + nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31); + nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 1); + } else { + nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0); + nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 0); + } + } +#endif +} + static inline bool is_powersaving_dpms(int mode) { return (mode != DRM_MODE_DPMS_ON); @@ -520,6 +543,7 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode) LVDS_PANEL_OFF, 0); } + nv04_dfp_update_backlight(encoder, mode); nv04_dfp_update_fp_control(encoder, mode); if (mode == DRM_MODE_DPMS_ON) @@ -543,6 +567,7 @@ static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode) NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n", mode, nv_encoder->dcb->index); + nv04_dfp_update_backlight(encoder, mode); nv04_dfp_update_fp_control(encoder, mode); } diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c index 44fefb0c708..13cdc05b7c2 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv.c +++ b/drivers/gpu/drm/nouveau/nv17_tv.c @@ -121,10 +121,14 @@ static bool get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask) { /* Zotac FX5200 */ - if (dev->pdev->device == 0x0322 && - dev->pdev->subsystem_vendor == 0x19da && - (dev->pdev->subsystem_device == 0x1035 || - dev->pdev->subsystem_device == 0x2035)) { + if (nv_match_device(dev, 0x0322, 0x19da, 0x1035) || + nv_match_device(dev, 0x0322, 0x19da, 0x2035)) { + *pin_mask = 0xc; + return false; + } + + /* MSI nForce2 IGP */ + if (nv_match_device(dev, 0x01f0, 0x1462, 0x5710)) { *pin_mask = 0xc; return false; } diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 37c7b48ab24..91ef93cf1f3 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -139,6 +139,8 @@ nv50_instmem_init(struct drm_device *dev) chan->file_priv = (struct drm_file *)-2; dev_priv->fifos[0] = dev_priv->fifos[127] = chan; + INIT_LIST_HEAD(&chan->ramht_refs); + /* Channel's PRAMIN object + heap */ ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0, NULL, &chan->ramin); @@ -278,7 +280,7 @@ nv50_instmem_init(struct drm_device *dev) /*XXX: incorrect, but needed to make hash func "work" */ dev_priv->ramht_offset = 0x10000; dev_priv->ramht_bits = 9; - dev_priv->ramht_size = (1 << dev_priv->ramht_bits); + dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8; return 0; } diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c index 3ab3cdc4217..6b451f86478 100644 --- a/drivers/gpu/drm/nouveau/nvc0_instmem.c +++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c @@ -142,14 +142,16 @@ int nvc0_instmem_suspend(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + u32 *buf; int i; dev_priv->susres.ramin_copy = vmalloc(65536); if (!dev_priv->susres.ramin_copy) return -ENOMEM; + buf = dev_priv->susres.ramin_copy; - for (i = 0x700000; i < 0x710000; i += 4) - dev_priv->susres.ramin_copy[i/4] = nv_rd32(dev, i); + for (i = 0; i < 65536; i += 4) + buf[i/4] = nv_rd32(dev, NV04_PRAMIN + i); return 0; } @@ -157,14 +159,15 @@ void nvc0_instmem_resume(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + u32 *buf = dev_priv->susres.ramin_copy; u64 chan; int i; chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram; nv_wr32(dev, 0x001700, chan >> 16); - for (i = 0x700000; i < 0x710000; i += 4) - nv_wr32(dev, i, dev_priv->susres.ramin_copy[i/4]); + for (i = 0; i < 65536; i += 4) + nv_wr32(dev, NV04_PRAMIN + i, buf[i/4]); vfree(dev_priv->susres.ramin_copy); dev_priv->susres.ramin_copy = NULL; @@ -221,7 +224,7 @@ nvc0_instmem_init(struct drm_device *dev) /*XXX: incorrect, but needed to make hash func "work" */ dev_priv->ramht_offset = 0x10000; dev_priv->ramht_bits = 9; - dev_priv->ramht_size = (1 << dev_priv->ramht_bits); + dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8; return 0; } diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c index 077af1f2f9b..a9e33ce6591 100644 --- a/drivers/gpu/drm/r128/r128_state.c +++ b/drivers/gpu/drm/r128/r128_state.c @@ -1639,30 +1639,29 @@ void r128_driver_preclose(struct drm_device *dev, struct drm_file *file_priv) r128_do_cleanup_pageflip(dev); } } - void r128_driver_lastclose(struct drm_device *dev) { r128_do_cleanup_cce(dev); } struct drm_ioctl_desc r128_ioctls[] = { - DRM_IOCTL_DEF(DRM_R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_R128_CCE_IDLE, r128_cce_idle, DRM_AUTH), - DRM_IOCTL_DEF(DRM_R128_RESET, r128_engine_reset, DRM_AUTH), - DRM_IOCTL_DEF(DRM_R128_FULLSCREEN, r128_fullscreen, DRM_AUTH), - DRM_IOCTL_DEF(DRM_R128_SWAP, r128_cce_swap, DRM_AUTH), - DRM_IOCTL_DEF(DRM_R128_FLIP, r128_cce_flip, DRM_AUTH), - DRM_IOCTL_DEF(DRM_R128_CLEAR, r128_cce_clear, DRM_AUTH), - DRM_IOCTL_DEF(DRM_R128_VERTEX, r128_cce_vertex, DRM_AUTH), - DRM_IOCTL_DEF(DRM_R128_INDICES, r128_cce_indices, DRM_AUTH), - DRM_IOCTL_DEF(DRM_R128_BLIT, r128_cce_blit, DRM_AUTH), - DRM_IOCTL_DEF(DRM_R128_DEPTH, r128_cce_depth, DRM_AUTH), - DRM_IOCTL_DEF(DRM_R128_STIPPLE, r128_cce_stipple, DRM_AUTH), - DRM_IOCTL_DEF(DRM_R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_R128_GETPARAM, r128_getparam, DRM_AUTH), + DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(R128_CCE_IDLE, r128_cce_idle, DRM_AUTH), + DRM_IOCTL_DEF_DRV(R128_RESET, r128_engine_reset, DRM_AUTH), + DRM_IOCTL_DEF_DRV(R128_FULLSCREEN, r128_fullscreen, DRM_AUTH), + DRM_IOCTL_DEF_DRV(R128_SWAP, r128_cce_swap, DRM_AUTH), + DRM_IOCTL_DEF_DRV(R128_FLIP, r128_cce_flip, DRM_AUTH), + DRM_IOCTL_DEF_DRV(R128_CLEAR, r128_cce_clear, DRM_AUTH), + DRM_IOCTL_DEF_DRV(R128_VERTEX, r128_cce_vertex, DRM_AUTH), + DRM_IOCTL_DEF_DRV(R128_INDICES, r128_cce_indices, DRM_AUTH), + DRM_IOCTL_DEF_DRV(R128_BLIT, r128_cce_blit, DRM_AUTH), + DRM_IOCTL_DEF_DRV(R128_DEPTH, r128_cce_depth, DRM_AUTH), + DRM_IOCTL_DEF_DRV(R128_STIPPLE, r128_cce_stipple, DRM_AUTH), + DRM_IOCTL_DEF_DRV(R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH), }; int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls); diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 12ad512bd3d..464a81a1990 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -332,6 +332,11 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc, args.usV_SyncWidth = cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start); + args.ucOverscanRight = radeon_crtc->h_border; + args.ucOverscanLeft = radeon_crtc->h_border; + args.ucOverscanBottom = radeon_crtc->v_border; + args.ucOverscanTop = radeon_crtc->v_border; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) misc |= ATOM_VSYNC_POLARITY; if (mode->flags & DRM_MODE_FLAG_NHSYNC) @@ -471,6 +476,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, struct radeon_encoder *radeon_encoder = NULL; u32 adjusted_clock = mode->clock; int encoder_mode = 0; + u32 dp_clock = mode->clock; + int bpc = 8; /* reset the pll flags */ pll->flags = 0; @@ -513,6 +520,17 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, if (encoder->crtc == crtc) { radeon_encoder = to_radeon_encoder(encoder); encoder_mode = atombios_get_encoder_mode(encoder); + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) { + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + if (connector) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct radeon_connector_atom_dig *dig_connector = + radeon_connector->con_priv; + + dp_clock = dig_connector->dp_clock; + } + } + if (ASIC_IS_AVIVO(rdev)) { /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) @@ -521,6 +539,20 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, pll->algo = PLL_ALGO_LEGACY; pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; } + /* There is some evidence (often anecdotal) that RV515 LVDS + * (on some boards at least) prefers the legacy algo. I'm not + * sure whether this should handled generically or on a + * case-by-case quirk basis. Both algos should work fine in the + * majority of cases. + */ + if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) && + (rdev->family == CHIP_RV515)) { + /* allow the user to overrride just in case */ + if (radeon_new_pll == 1) + pll->algo = PLL_ALGO_NEW; + else + pll->algo = PLL_ALGO_LEGACY; + } } else { if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; @@ -555,6 +587,14 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); args.v1.ucTransmitterID = radeon_encoder->encoder_id; args.v1.ucEncodeMode = encoder_mode; + if (encoder_mode == ATOM_ENCODER_MODE_DP) { + /* may want to enable SS on DP eventually */ + /* args.v1.ucConfig |= + ADJUST_DISPLAY_CONFIG_SS_ENABLE;*/ + } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { + args.v1.ucConfig |= + ADJUST_DISPLAY_CONFIG_SS_ENABLE; + } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); @@ -568,10 +608,20 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - if (encoder_mode == ATOM_ENCODER_MODE_DP) + if (encoder_mode == ATOM_ENCODER_MODE_DP) { + /* may want to enable SS on DP/eDP eventually */ + /*args.v3.sInput.ucDispPllConfig |= + DISPPLL_CONFIG_SS_ENABLE;*/ args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_COHERENT_MODE; - else { + /* 16200 or 27000 */ + args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); + } else { + if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { + /* deep color support */ + args.v3.sInput.usPixelClock = + cpu_to_le16((mode->clock * bpc / 8) / 10); + } if (dig->coherent_mode) args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_COHERENT_MODE; @@ -580,13 +630,19 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, DISPPLL_CONFIG_DUAL_LINK; } } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - /* may want to enable SS on DP/eDP eventually */ - /*args.v3.sInput.ucDispPllConfig |= - DISPPLL_CONFIG_SS_ENABLE;*/ - if (encoder_mode == ATOM_ENCODER_MODE_DP) + if (encoder_mode == ATOM_ENCODER_MODE_DP) { + /* may want to enable SS on DP/eDP eventually */ + /*args.v3.sInput.ucDispPllConfig |= + DISPPLL_CONFIG_SS_ENABLE;*/ args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_COHERENT_MODE; - else { + /* 16200 or 27000 */ + args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); + } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { + /* want to enable SS on LVDS eventually */ + /*args.v3.sInput.ucDispPllConfig |= + DISPPLL_CONFIG_SS_ENABLE;*/ + } else { if (mode->clock > 165000) args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_DUAL_LINK; @@ -1019,11 +1075,11 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, if (rdev->family >= CHIP_RV770) { if (radeon_crtc->crtc_id) { - WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0); - WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0); + WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); + WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); } else { - WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0); - WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0); + WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); + WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); } } WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, @@ -1160,8 +1216,18 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; + struct drm_encoder *encoder; + bool is_tvcv = false; - /* TODO color tiling */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + /* find tv std */ + if (encoder->crtc == crtc) { + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + if (radeon_encoder->active_device & + (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) + is_tvcv = true; + } + } atombios_disable_ss(crtc); /* always set DCPLL */ @@ -1170,9 +1236,14 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, atombios_crtc_set_pll(crtc, adjusted_mode); atombios_enable_ss(crtc); - if (ASIC_IS_AVIVO(rdev)) + if (ASIC_IS_DCE4(rdev)) atombios_set_crtc_dtd_timing(crtc, adjusted_mode); - else { + else if (ASIC_IS_AVIVO(rdev)) { + if (is_tvcv) + atombios_crtc_set_timing(crtc, adjusted_mode); + else + atombios_set_crtc_dtd_timing(crtc, adjusted_mode); + } else { atombios_crtc_set_timing(crtc, adjusted_mode); if (radeon_crtc->crtc_id == 0) atombios_set_crtc_dtd_timing(crtc, adjusted_mode); diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 36e0d4b545e..4e7778d44b8 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -610,7 +610,7 @@ void dp_link_train(struct drm_encoder *encoder, enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; else enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; - if (dig_connector->linkb) + if (dig->linkb) enc_id |= ATOM_DP_CONFIG_LINK_B; else enc_id |= ATOM_DP_CONFIG_LINK_A; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 957d5067ad9..b8b7f010b25 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -675,6 +675,43 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev) return 0; } +static int evergreen_cp_start(struct radeon_device *rdev) +{ + int r; + uint32_t cp_me; + + r = radeon_ring_lock(rdev, 7); + if (r) { + DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); + return r; + } + radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); + radeon_ring_write(rdev, 0x1); + radeon_ring_write(rdev, 0x0); + radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1); + radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 0); + radeon_ring_unlock_commit(rdev); + + cp_me = 0xff; + WREG32(CP_ME_CNTL, cp_me); + + r = radeon_ring_lock(rdev, 4); + if (r) { + DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); + return r; + } + /* init some VGT regs */ + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); + radeon_ring_write(rdev, (VGT_VERTEX_REUSE_BLOCK_CNTL - PACKET3_SET_CONTEXT_REG_START) >> 2); + radeon_ring_write(rdev, 0xe); + radeon_ring_write(rdev, 0x10); + radeon_ring_unlock_commit(rdev); + + return 0; +} + int evergreen_cp_resume(struct radeon_device *rdev) { u32 tmp; @@ -719,7 +756,7 @@ int evergreen_cp_resume(struct radeon_device *rdev) rdev->cp.rptr = RREG32(CP_RB_RPTR); rdev->cp.wptr = RREG32(CP_RB_WPTR); - r600_cp_start(rdev); + evergreen_cp_start(rdev); rdev->cp.ready = true; r = radeon_ring_test(rdev); if (r) { @@ -2054,11 +2091,6 @@ int evergreen_resume(struct radeon_device *rdev) */ /* post card */ atom_asic_init(rdev->mode_info.atom_context); - /* Initialize clocks */ - r = radeon_clocks_init(rdev); - if (r) { - return r; - } r = evergreen_startup(rdev); if (r) { @@ -2164,9 +2196,6 @@ int evergreen_init(struct radeon_device *rdev) radeon_surface_init(rdev); /* Initialize clocks */ radeon_get_clock_info(rdev->ddev); - r = radeon_clocks_init(rdev); - if (r) - return r; /* Fence driver */ r = radeon_fence_driver_init(rdev); if (r) @@ -2236,7 +2265,6 @@ void evergreen_fini(struct radeon_device *rdev) evergreen_pcie_gart_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); - radeon_clocks_fini(rdev); radeon_agp_fini(rdev); radeon_bo_fini(rdev); radeon_atombios_fini(rdev); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index d0ebae9dde2..afc18d87fdc 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2119,10 +2119,7 @@ int r600_cp_start(struct radeon_device *rdev) } radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); radeon_ring_write(rdev, 0x1); - if (rdev->family >= CHIP_CEDAR) { - radeon_ring_write(rdev, 0x0); - radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1); - } else if (rdev->family >= CHIP_RV770) { + if (rdev->family >= CHIP_RV770) { radeon_ring_write(rdev, 0x0); radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1); } else { @@ -2489,11 +2486,6 @@ int r600_resume(struct radeon_device *rdev) */ /* post card */ atom_asic_init(rdev->mode_info.atom_context); - /* Initialize clocks */ - r = radeon_clocks_init(rdev); - if (r) { - return r; - } r = r600_startup(rdev); if (r) { @@ -2586,9 +2578,6 @@ int r600_init(struct radeon_device *rdev) radeon_surface_init(rdev); /* Initialize clocks */ radeon_get_clock_info(rdev->ddev); - r = radeon_clocks_init(rdev); - if (r) - return r; /* Fence driver */ r = radeon_fence_driver_init(rdev); if (r) @@ -2663,7 +2652,6 @@ void r600_fini(struct radeon_device *rdev) radeon_agp_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); - radeon_clocks_fini(rdev); radeon_bo_fini(rdev); radeon_atombios_fini(rdev); kfree(rdev->bios); @@ -3541,7 +3529,7 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL */ if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { - void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; + void __iomem *ptr = (void *)rdev->vram_scratch.ptr; u32 tmp; WREG32(HDP_DEBUG1, 0); diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 3dfcfa3ca42..a168d644bf9 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1013,6 +1013,11 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); +/* VRAM scratch page for HDP bug */ +struct r700_vram_scratch { + struct radeon_bo *robj; + volatile uint32_t *ptr; +}; /* * Core structure, functions and helpers. @@ -1079,6 +1084,7 @@ struct radeon_device { const struct firmware *pfp_fw; /* r6/700 PFP firmware */ const struct firmware *rlc_fw; /* r6/700 RLC firmware */ struct r600_blit r600_blit; + struct r700_vram_scratch vram_scratch; int msi_enabled; /* msi enabled */ struct r600_ih ih; /* r6/700 interrupt ring */ struct workqueue_struct *wq; @@ -1333,8 +1339,6 @@ extern bool radeon_card_posted(struct radeon_device *rdev); extern void radeon_update_bandwidth_info(struct radeon_device *rdev); extern void radeon_update_display_priority(struct radeon_device *rdev); extern bool radeon_boot_test_post_card(struct radeon_device *rdev); -extern int radeon_clocks_init(struct radeon_device *rdev); -extern void radeon_clocks_fini(struct radeon_device *rdev); extern void radeon_scratch_init(struct radeon_device *rdev); extern void radeon_surface_init(struct radeon_device *rdev); extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c index f40dfb77f9b..bd2f33e5c91 100644 --- a/drivers/gpu/drm/radeon/radeon_agp.c +++ b/drivers/gpu/drm/radeon/radeon_agp.c @@ -156,7 +156,13 @@ int radeon_agp_init(struct radeon_device *rdev) } mode.mode = info.mode; - agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode; + /* chips with the agp to pcie bridge don't have the AGP_STATUS register + * Just use the whatever mode the host sets up. + */ + if (rdev->family <= CHIP_RV350) + agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode; + else + agp_status = mode.mode; is_v3 = !!(agp_status & RADEON_AGPv3_MODE); if (is_v3) { diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 646f96f97c7..25e1dd19779 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -733,6 +733,7 @@ static struct radeon_asic evergreen_asic = { .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = &radeon_atom_get_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock, + .get_pcie_lanes = NULL, .set_pcie_lanes = NULL, .set_clock_gating = NULL, .set_surface_reg = r600_set_surface_reg, @@ -857,21 +858,3 @@ int radeon_asic_init(struct radeon_device *rdev) return 0; } -/* - * Wrapper around modesetting bits. Move to radeon_clocks.c? - */ -int radeon_clocks_init(struct radeon_device *rdev) -{ - int r; - - r = radeon_static_clocks_init(rdev->ddev); - if (r) { - return r; - } - DRM_INFO("Clocks initialized !\n"); - return 0; -} - -void radeon_clocks_fini(struct radeon_device *rdev) -{ -} diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 6d30868744e..ebae14c4b76 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -32,11 +32,11 @@ /* from radeon_encoder.c */ extern uint32_t -radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, - uint8_t dac); +radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, + uint8_t dac); extern void radeon_link_encoder_connector(struct drm_device *dev); extern void -radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, +radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device); /* from radeon_connector.c */ @@ -46,14 +46,14 @@ radeon_add_atom_connector(struct drm_device *dev, uint32_t supported_device, int connector_type, struct radeon_i2c_bus_rec *i2c_bus, - bool linkb, uint32_t igp_lane_info, + uint32_t igp_lane_info, uint16_t connector_object_id, struct radeon_hpd *hpd, struct radeon_router *router); /* from radeon_legacy_encoder.c */ extern void -radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, +radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device); union atom_supported_devices { @@ -85,6 +85,19 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev for (i = 0; i < num_indices; i++) { gpio = &i2c_info->asGPIO_Info[i]; + /* some evergreen boards have bad data for this entry */ + if (ASIC_IS_DCE4(rdev)) { + if ((i == 7) && + (gpio->usClkMaskRegisterIndex == 0x1936) && + (gpio->sucI2cId.ucAccess == 0)) { + gpio->sucI2cId.ucAccess = 0x97; + gpio->ucDataMaskShift = 8; + gpio->ucDataEnShift = 8; + gpio->ucDataY_Shift = 8; + gpio->ucDataA_Shift = 8; + } + } + if (gpio->sucI2cId.ucAccess == id) { i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; @@ -147,6 +160,20 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) for (i = 0; i < num_indices; i++) { gpio = &i2c_info->asGPIO_Info[i]; i2c.valid = false; + + /* some evergreen boards have bad data for this entry */ + if (ASIC_IS_DCE4(rdev)) { + if ((i == 7) && + (gpio->usClkMaskRegisterIndex == 0x1936) && + (gpio->sucI2cId.ucAccess == 0)) { + gpio->sucI2cId.ucAccess = 0x97; + gpio->ucDataMaskShift = 8; + gpio->ucDataEnShift = 8; + gpio->ucDataY_Shift = 8; + gpio->ucDataA_Shift = 8; + } + } + i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; @@ -226,6 +253,8 @@ static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device struct radeon_hpd hpd; u32 reg; + memset(&hpd, 0, sizeof(struct radeon_hpd)); + if (ASIC_IS_DCE4(rdev)) reg = EVERGREEN_DC_GPIO_HPD_A; else @@ -477,7 +506,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) int i, j, k, path_size, device_support; int connector_type; u16 igp_lane_info, conn_id, connector_object_id; - bool linkb; struct radeon_i2c_bus_rec ddc_bus; struct radeon_router router; struct radeon_gpio_rec gpio; @@ -510,7 +538,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) addr += path_size; path = (ATOM_DISPLAY_OBJECT_PATH *) addr; path_size += le16_to_cpu(path->usSize); - linkb = false; + if (device_support & le16_to_cpu(path->usDeviceTag)) { uint8_t con_obj_id, con_obj_num, con_obj_type; @@ -601,13 +629,10 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) { - if (grph_obj_num == 2) - linkb = true; - else - linkb = false; + u16 encoder_obj = le16_to_cpu(path->usGraphicObjIds[j]); radeon_add_atom_encoder(dev, - grph_obj_id, + encoder_obj, le16_to_cpu (path-> usDeviceTag)); @@ -744,7 +769,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) le16_to_cpu(path-> usDeviceTag), connector_type, &ddc_bus, - linkb, igp_lane_info, + igp_lane_info, connector_object_id, &hpd, &router); @@ -933,13 +958,13 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) radeon_add_atom_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, (1 << i), dac), (1 << i)); else radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, (1 << i), dac), (1 << i)); @@ -996,7 +1021,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct bios_connectors[i]. connector_type, &bios_connectors[i].ddc_bus, - false, 0, + 0, connector_object_id, &bios_connectors[i].hpd, &router); @@ -1183,7 +1208,7 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev) return true; break; case 2: - if (igp_info->info_2.ucMemoryType & 0x0f) + if (igp_info->info_2.ulBootUpSidePortClock) return true; break; default: @@ -1305,6 +1330,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct union lvds_info *lvds_info; uint8_t frev, crev; struct radeon_encoder_atom_dig *lvds = NULL; + int encoder_enum = (encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { @@ -1368,6 +1394,12 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct } encoder->native_mode = lvds->native_mode; + + if (encoder_enum == 2) + lvds->linkb = true; + else + lvds->linkb = false; + } return lvds; } diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index 14448a740ba..5249af8931e 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c @@ -327,6 +327,14 @@ void radeon_get_clock_info(struct drm_device *dev) mpll->max_feedback_div = 0xff; mpll->best_vco = 0; + if (!rdev->clock.default_sclk) + rdev->clock.default_sclk = radeon_get_engine_clock(rdev); + if ((!rdev->clock.default_mclk) && rdev->asic->get_memory_clock) + rdev->clock.default_mclk = radeon_get_memory_clock(rdev); + + rdev->pm.current_sclk = rdev->clock.default_sclk; + rdev->pm.current_mclk = rdev->clock.default_mclk; + } /* 10 khz */ @@ -897,53 +905,3 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) } } -static void radeon_apply_clock_quirks(struct radeon_device *rdev) -{ - uint32_t tmp; - - /* XXX make sure engine is idle */ - - if (rdev->family < CHIP_RS600) { - tmp = RREG32_PLL(RADEON_SCLK_CNTL); - if (ASIC_IS_R300(rdev) || ASIC_IS_RV100(rdev)) - tmp |= RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_VIP; - if ((rdev->family == CHIP_RV250) - || (rdev->family == CHIP_RV280)) - tmp |= - RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_DISP2; - if ((rdev->family == CHIP_RV350) - || (rdev->family == CHIP_RV380)) - tmp |= R300_SCLK_FORCE_VAP; - if (rdev->family == CHIP_R420) - tmp |= R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX; - WREG32_PLL(RADEON_SCLK_CNTL, tmp); - } else if (rdev->family < CHIP_R600) { - tmp = RREG32_PLL(AVIVO_CP_DYN_CNTL); - tmp |= AVIVO_CP_FORCEON; - WREG32_PLL(AVIVO_CP_DYN_CNTL, tmp); - - tmp = RREG32_PLL(AVIVO_E2_DYN_CNTL); - tmp |= AVIVO_E2_FORCEON; - WREG32_PLL(AVIVO_E2_DYN_CNTL, tmp); - - tmp = RREG32_PLL(AVIVO_IDCT_DYN_CNTL); - tmp |= AVIVO_IDCT_FORCEON; - WREG32_PLL(AVIVO_IDCT_DYN_CNTL, tmp); - } -} - -int radeon_static_clocks_init(struct drm_device *dev) -{ - struct radeon_device *rdev = dev->dev_private; - - /* XXX make sure engine is idle */ - - if (radeon_dynclks != -1) { - if (radeon_dynclks) { - if (rdev->asic->set_clock_gating) - radeon_set_clock_gating(rdev, 1); - } - } - radeon_apply_clock_quirks(rdev); - return 0; -} diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 885dcfac183..bd74e428bd1 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -39,8 +39,8 @@ /* from radeon_encoder.c */ extern uint32_t -radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, - uint8_t dac); +radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, + uint8_t dac); extern void radeon_link_encoder_connector(struct drm_device *dev); /* from radeon_connector.c */ @@ -55,7 +55,7 @@ radeon_add_legacy_connector(struct drm_device *dev, /* from radeon_legacy_encoder.c */ extern void -radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, +radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device); /* old legacy ATI BIOS routines */ @@ -1505,7 +1505,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); @@ -1520,7 +1520,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_LCD1_SUPPORT, 0), ATOM_DEVICE_LCD1_SUPPORT); @@ -1535,7 +1535,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); @@ -1550,12 +1550,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); hpd.hpd = RADEON_HPD_1; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_DFP1_SUPPORT, 0), ATOM_DEVICE_DFP1_SUPPORT); radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT2_SUPPORT, 2), ATOM_DEVICE_CRT2_SUPPORT); @@ -1571,7 +1571,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); @@ -1588,7 +1588,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c.valid = false; hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_TV1_SUPPORT, 2), ATOM_DEVICE_TV1_SUPPORT); @@ -1607,7 +1607,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_LCD1_SUPPORT, 0), ATOM_DEVICE_LCD1_SUPPORT); @@ -1619,7 +1619,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT2_SUPPORT, 2), ATOM_DEVICE_CRT2_SUPPORT); @@ -1631,7 +1631,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c.valid = false; hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_TV1_SUPPORT, 2), ATOM_DEVICE_TV1_SUPPORT); @@ -1648,7 +1648,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_LCD1_SUPPORT, 0), ATOM_DEVICE_LCD1_SUPPORT); @@ -1660,12 +1660,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); hpd.hpd = RADEON_HPD_2; /* ??? */ radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_DFP2_SUPPORT, 0), ATOM_DEVICE_DFP2_SUPPORT); radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); @@ -1680,7 +1680,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c.valid = false; hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_TV1_SUPPORT, 2), ATOM_DEVICE_TV1_SUPPORT); @@ -1697,7 +1697,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_LCD1_SUPPORT, 0), ATOM_DEVICE_LCD1_SUPPORT); @@ -1709,12 +1709,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); hpd.hpd = RADEON_HPD_1; /* ??? */ radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_DFP1_SUPPORT, 0), ATOM_DEVICE_DFP1_SUPPORT); radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); @@ -1728,7 +1728,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c.valid = false; hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_TV1_SUPPORT, 2), ATOM_DEVICE_TV1_SUPPORT); @@ -1745,7 +1745,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_LCD1_SUPPORT, 0), ATOM_DEVICE_LCD1_SUPPORT); @@ -1757,7 +1757,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); @@ -1769,7 +1769,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c.valid = false; hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_TV1_SUPPORT, 2), ATOM_DEVICE_TV1_SUPPORT); @@ -1786,12 +1786,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); hpd.hpd = RADEON_HPD_2; /* ??? */ radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_DFP2_SUPPORT, 0), ATOM_DEVICE_DFP2_SUPPORT); radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT2_SUPPORT, 2), ATOM_DEVICE_CRT2_SUPPORT); @@ -1806,7 +1806,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c.valid = false; hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_TV1_SUPPORT, 2), ATOM_DEVICE_TV1_SUPPORT); @@ -1823,12 +1823,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); hpd.hpd = RADEON_HPD_1; /* ??? */ radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_DFP1_SUPPORT, 0), ATOM_DEVICE_DFP1_SUPPORT); radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT2_SUPPORT, 2), ATOM_DEVICE_CRT2_SUPPORT); @@ -1842,7 +1842,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c.valid = false; hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_TV1_SUPPORT, 2), ATOM_DEVICE_TV1_SUPPORT); @@ -1859,7 +1859,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); hpd.hpd = RADEON_HPD_1; /* ??? */ radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_DFP1_SUPPORT, 0), ATOM_DEVICE_DFP1_SUPPORT); @@ -1871,7 +1871,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT2_SUPPORT, 2), ATOM_DEVICE_CRT2_SUPPORT); @@ -1883,7 +1883,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c.valid = false; hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_TV1_SUPPORT, 2), ATOM_DEVICE_TV1_SUPPORT); @@ -1900,7 +1900,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); @@ -1912,7 +1912,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT2_SUPPORT, 2), ATOM_DEVICE_CRT2_SUPPORT); @@ -1924,7 +1924,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c.valid = false; hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_TV1_SUPPORT, 2), ATOM_DEVICE_TV1_SUPPORT); @@ -1941,7 +1941,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); @@ -1952,7 +1952,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); hpd.hpd = RADEON_HPD_NONE; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT2_SUPPORT, 2), ATOM_DEVICE_CRT2_SUPPORT); @@ -2109,7 +2109,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) else devices = ATOM_DEVICE_DFP1_SUPPORT; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id + radeon_get_encoder_enum (dev, devices, 0), devices); radeon_add_legacy_connector(dev, i, devices, @@ -2123,7 +2123,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) if (tmp & 0x1) { devices = ATOM_DEVICE_CRT2_SUPPORT; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id + radeon_get_encoder_enum (dev, ATOM_DEVICE_CRT2_SUPPORT, 2), @@ -2131,7 +2131,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) } else { devices = ATOM_DEVICE_CRT1_SUPPORT; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id + radeon_get_encoder_enum (dev, ATOM_DEVICE_CRT1_SUPPORT, 1), @@ -2151,7 +2151,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) if (tmp & 0x1) { devices |= ATOM_DEVICE_CRT2_SUPPORT; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id + radeon_get_encoder_enum (dev, ATOM_DEVICE_CRT2_SUPPORT, 2), @@ -2159,7 +2159,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) } else { devices |= ATOM_DEVICE_CRT1_SUPPORT; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id + radeon_get_encoder_enum (dev, ATOM_DEVICE_CRT1_SUPPORT, 1), @@ -2168,7 +2168,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) if ((tmp >> 4) & 0x1) { devices |= ATOM_DEVICE_DFP2_SUPPORT; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id + radeon_get_encoder_enum (dev, ATOM_DEVICE_DFP2_SUPPORT, 0), @@ -2177,7 +2177,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) } else { devices |= ATOM_DEVICE_DFP1_SUPPORT; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id + radeon_get_encoder_enum (dev, ATOM_DEVICE_DFP1_SUPPORT, 0), @@ -2202,7 +2202,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; } radeon_add_legacy_encoder(dev, - radeon_get_encoder_id + radeon_get_encoder_enum (dev, devices, 0), devices); radeon_add_legacy_connector(dev, i, devices, @@ -2215,7 +2215,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) case CONNECTOR_CTV_LEGACY: case CONNECTOR_STV_LEGACY: radeon_add_legacy_encoder(dev, - radeon_get_encoder_id + radeon_get_encoder_enum (dev, ATOM_DEVICE_TV1_SUPPORT, 2), @@ -2242,12 +2242,12 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) DRM_DEBUG_KMS("Found DFP table, assuming DVI connector\n"); radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_DFP1_SUPPORT, 0), ATOM_DEVICE_DFP1_SUPPORT); @@ -2268,7 +2268,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) DRM_DEBUG_KMS("Found CRT table, assuming VGA connector\n"); if (crt_info) { radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_CRT1_SUPPORT, 1), ATOM_DEVICE_CRT1_SUPPORT); @@ -2297,7 +2297,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) COMBIOS_LCD_DDC_INFO_TABLE); radeon_add_legacy_encoder(dev, - radeon_get_encoder_id(dev, + radeon_get_encoder_enum(dev, ATOM_DEVICE_LCD1_SUPPORT, 0), ATOM_DEVICE_LCD1_SUPPORT); @@ -2351,7 +2351,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) hpd.hpd = RADEON_HPD_NONE; ddc_i2c.valid = false; radeon_add_legacy_encoder(dev, - radeon_get_encoder_id + radeon_get_encoder_enum (dev, ATOM_DEVICE_TV1_SUPPORT, 2), diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 47c4b276d30..a9dd7847d96 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -977,27 +977,29 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto struct radeon_connector *radeon_connector = to_radeon_connector(connector); enum drm_connector_status ret = connector_status_disconnected; struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; - u8 sink_type; if (radeon_connector->edid) { kfree(radeon_connector->edid); radeon_connector->edid = NULL; } - sink_type = radeon_dp_getsinktype(radeon_connector); - if ((sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || - (sink_type == CONNECTOR_OBJECT_ID_eDP)) { - if (radeon_dp_getdpcd(radeon_connector)) { - radeon_dig_connector->dp_sink_type = sink_type; + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { + /* eDP is always DP */ + radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; + if (radeon_dp_getdpcd(radeon_connector)) ret = connector_status_connected; - } } else { - if (radeon_ddc_probe(radeon_connector)) { - radeon_dig_connector->dp_sink_type = sink_type; - ret = connector_status_connected; + radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); + if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { + if (radeon_dp_getdpcd(radeon_connector)) + ret = connector_status_connected; + } else { + if (radeon_ddc_probe(radeon_connector)) + ret = connector_status_connected; } } + radeon_connector_update_scratch_regs(connector, ret); return ret; } @@ -1037,7 +1039,6 @@ radeon_add_atom_connector(struct drm_device *dev, uint32_t supported_device, int connector_type, struct radeon_i2c_bus_rec *i2c_bus, - bool linkb, uint32_t igp_lane_info, uint16_t connector_object_id, struct radeon_hpd *hpd, @@ -1050,10 +1051,16 @@ radeon_add_atom_connector(struct drm_device *dev, uint32_t subpixel_order = SubPixelNone; bool shared_ddc = false; - /* fixme - tv/cv/din */ if (connector_type == DRM_MODE_CONNECTOR_Unknown) return; + /* if the user selected tv=0 don't try and add the connector */ + if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) || + (connector_type == DRM_MODE_CONNECTOR_Composite) || + (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) && + (radeon_tv == 0)) + return; + /* see if we already added it */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { radeon_connector = to_radeon_connector(connector); @@ -1128,7 +1135,6 @@ radeon_add_atom_connector(struct drm_device *dev, radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); if (!radeon_dig_connector) goto failed; - radeon_dig_connector->linkb = linkb; radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); @@ -1158,7 +1164,6 @@ radeon_add_atom_connector(struct drm_device *dev, radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); if (!radeon_dig_connector) goto failed; - radeon_dig_connector->linkb = linkb; radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); @@ -1182,7 +1187,6 @@ radeon_add_atom_connector(struct drm_device *dev, radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); if (!radeon_dig_connector) goto failed; - radeon_dig_connector->linkb = linkb; radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); @@ -1211,25 +1215,22 @@ radeon_add_atom_connector(struct drm_device *dev, case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Composite: case DRM_MODE_CONNECTOR_9PinDIN: - if (radeon_tv == 1) { - drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); - drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); - radeon_connector->dac_load_detect = true; - drm_connector_attach_property(&radeon_connector->base, - rdev->mode_info.load_detect_property, - 1); - drm_connector_attach_property(&radeon_connector->base, - rdev->mode_info.tv_std_property, - radeon_atombios_get_tv_info(rdev)); - /* no HPD on analog connectors */ - radeon_connector->hpd.hpd = RADEON_HPD_NONE; - } + drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); + drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); + radeon_connector->dac_load_detect = true; + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.load_detect_property, + 1); + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.tv_std_property, + radeon_atombios_get_tv_info(rdev)); + /* no HPD on analog connectors */ + radeon_connector->hpd.hpd = RADEON_HPD_NONE; break; case DRM_MODE_CONNECTOR_LVDS: radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); if (!radeon_dig_connector) goto failed; - radeon_dig_connector->linkb = linkb; radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); @@ -1275,10 +1276,16 @@ radeon_add_legacy_connector(struct drm_device *dev, struct radeon_connector *radeon_connector; uint32_t subpixel_order = SubPixelNone; - /* fixme - tv/cv/din */ if (connector_type == DRM_MODE_CONNECTOR_Unknown) return; + /* if the user selected tv=0 don't try and add the connector */ + if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) || + (connector_type == DRM_MODE_CONNECTOR_Composite) || + (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) && + (radeon_tv == 0)) + return; + /* see if we already added it */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { radeon_connector = to_radeon_connector(connector); @@ -1350,26 +1357,24 @@ radeon_add_legacy_connector(struct drm_device *dev, case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Composite: case DRM_MODE_CONNECTOR_9PinDIN: - if (radeon_tv == 1) { - drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); - drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); - radeon_connector->dac_load_detect = true; - /* RS400,RC410,RS480 chipset seems to report a lot - * of false positive on load detect, we haven't yet - * found a way to make load detect reliable on those - * chipset, thus just disable it for TV. - */ - if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) - radeon_connector->dac_load_detect = false; - drm_connector_attach_property(&radeon_connector->base, - rdev->mode_info.load_detect_property, - radeon_connector->dac_load_detect); - drm_connector_attach_property(&radeon_connector->base, - rdev->mode_info.tv_std_property, - radeon_combios_get_tv_info(rdev)); - /* no HPD on analog connectors */ - radeon_connector->hpd.hpd = RADEON_HPD_NONE; - } + drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); + drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); + radeon_connector->dac_load_detect = true; + /* RS400,RC410,RS480 chipset seems to report a lot + * of false positive on load detect, we haven't yet + * found a way to make load detect reliable on those + * chipset, thus just disable it for TV. + */ + if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) + radeon_connector->dac_load_detect = false; + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.load_detect_property, + radeon_connector->dac_load_detect); + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.tv_std_property, + radeon_combios_get_tv_info(rdev)); + /* no HPD on analog connectors */ + radeon_connector->hpd.hpd = RADEON_HPD_NONE; break; case DRM_MODE_CONNECTOR_LVDS: drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 4f7a170d156..256d204a6d2 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -199,7 +199,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 mc->mc_vram_size = mc->aper_size; } mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; - if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) { + if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) { dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); mc->real_vram_size = mc->aper_size; mc->mc_vram_size = mc->aper_size; @@ -293,30 +293,20 @@ bool radeon_card_posted(struct radeon_device *rdev) void radeon_update_bandwidth_info(struct radeon_device *rdev) { fixed20_12 a; - u32 sclk, mclk; + u32 sclk = rdev->pm.current_sclk; + u32 mclk = rdev->pm.current_mclk; - if (rdev->flags & RADEON_IS_IGP) { - sclk = radeon_get_engine_clock(rdev); - mclk = rdev->clock.default_mclk; - - a.full = dfixed_const(100); - rdev->pm.sclk.full = dfixed_const(sclk); - rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); - rdev->pm.mclk.full = dfixed_const(mclk); - rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); + /* sclk/mclk in Mhz */ + a.full = dfixed_const(100); + rdev->pm.sclk.full = dfixed_const(sclk); + rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); + rdev->pm.mclk.full = dfixed_const(mclk); + rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); + if (rdev->flags & RADEON_IS_IGP) { a.full = dfixed_const(16); /* core_bandwidth = sclk(Mhz) * 16 */ rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); - } else { - sclk = radeon_get_engine_clock(rdev); - mclk = radeon_get_memory_clock(rdev); - - a.full = dfixed_const(100); - rdev->pm.sclk.full = dfixed_const(sclk); - rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); - rdev->pm.mclk.full = dfixed_const(mclk); - rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); } } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 5764f4d3b4f..6dd434ad242 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1094,6 +1094,18 @@ void radeon_modeset_fini(struct radeon_device *rdev) radeon_i2c_fini(rdev); } +static bool is_hdtv_mode(struct drm_display_mode *mode) +{ + /* try and guess if this is a tv or a monitor */ + if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */ + (mode->vdisplay == 576) || /* 576p */ + (mode->vdisplay == 720) || /* 720p */ + (mode->vdisplay == 1080)) /* 1080p */ + return true; + else + return false; +} + bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -1141,7 +1153,8 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, if (ASIC_IS_AVIVO(rdev) && ((radeon_encoder->underscan_type == UNDERSCAN_ON) || ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && - drm_detect_hdmi_monitor(radeon_connector->edid)))) { + drm_detect_hdmi_monitor(radeon_connector->edid) && + is_hdtv_mode(mode)))) { radeon_crtc->h_border = (mode->hdisplay >> 5) + 16; radeon_crtc->v_border = (mode->vdisplay >> 5) + 16; radeon_crtc->rmx_type = RMX_FULL; diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 263c8098d7d..2c293e8304d 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -81,7 +81,7 @@ void radeon_setup_encoder_clones(struct drm_device *dev) } uint32_t -radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac) +radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac) { struct radeon_device *rdev = dev->dev_private; uint32_t ret = 0; @@ -97,59 +97,59 @@ radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t if ((rdev->family == CHIP_RS300) || (rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480)) - ret = ENCODER_OBJECT_ID_INTERNAL_DAC2; + ret = ENCODER_INTERNAL_DAC2_ENUM_ID1; else if (ASIC_IS_AVIVO(rdev)) - ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1; + ret = ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1; else - ret = ENCODER_OBJECT_ID_INTERNAL_DAC1; + ret = ENCODER_INTERNAL_DAC1_ENUM_ID1; break; case 2: /* dac b */ if (ASIC_IS_AVIVO(rdev)) - ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2; + ret = ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1; else { /*if (rdev->family == CHIP_R200) - ret = ENCODER_OBJECT_ID_INTERNAL_DVO1; + ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; else*/ - ret = ENCODER_OBJECT_ID_INTERNAL_DAC2; + ret = ENCODER_INTERNAL_DAC2_ENUM_ID1; } break; case 3: /* external dac */ if (ASIC_IS_AVIVO(rdev)) - ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1; + ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1; else - ret = ENCODER_OBJECT_ID_INTERNAL_DVO1; + ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; break; } break; case ATOM_DEVICE_LCD1_SUPPORT: if (ASIC_IS_AVIVO(rdev)) - ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1; + ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1; else - ret = ENCODER_OBJECT_ID_INTERNAL_LVDS; + ret = ENCODER_INTERNAL_LVDS_ENUM_ID1; break; case ATOM_DEVICE_DFP1_SUPPORT: if ((rdev->family == CHIP_RS300) || (rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480)) - ret = ENCODER_OBJECT_ID_INTERNAL_DVO1; + ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; else if (ASIC_IS_AVIVO(rdev)) - ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1; + ret = ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1; else - ret = ENCODER_OBJECT_ID_INTERNAL_TMDS1; + ret = ENCODER_INTERNAL_TMDS1_ENUM_ID1; break; case ATOM_DEVICE_LCD2_SUPPORT: case ATOM_DEVICE_DFP2_SUPPORT: if ((rdev->family == CHIP_RS600) || (rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) - ret = ENCODER_OBJECT_ID_INTERNAL_DDI; + ret = ENCODER_INTERNAL_DDI_ENUM_ID1; else if (ASIC_IS_AVIVO(rdev)) - ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1; + ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1; else - ret = ENCODER_OBJECT_ID_INTERNAL_DVO1; + ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; break; case ATOM_DEVICE_DFP3_SUPPORT: - ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1; + ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1; break; } @@ -228,32 +228,6 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder) return NULL; } -static struct radeon_connector_atom_dig * -radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct drm_connector *connector; - struct radeon_connector *radeon_connector; - struct radeon_connector_atom_dig *dig_connector; - - if (!rdev->is_atom_bios) - return NULL; - - connector = radeon_get_connector_for_encoder(encoder); - if (!connector) - return NULL; - - radeon_connector = to_radeon_connector(connector); - - if (!radeon_connector->con_priv) - return NULL; - - dig_connector = radeon_connector->con_priv; - - return dig_connector; -} - void radeon_panel_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { @@ -512,14 +486,12 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - struct radeon_connector_atom_dig *dig_connector = - radeon_get_atom_connector_priv_from_encoder(encoder); union lvds_encoder_control args; int index = 0; int hdmi_detected = 0; uint8_t frev, crev; - if (!dig || !dig_connector) + if (!dig) return; if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) @@ -562,7 +534,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) args.v1.ucMisc |= (1 << 1); } else { - if (dig_connector->linkb) + if (dig->linkb) args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; if (radeon_encoder->pixel_clock > 165000) args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; @@ -601,7 +573,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; } } else { - if (dig_connector->linkb) + if (dig->linkb) args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; if (radeon_encoder->pixel_clock > 165000) args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; @@ -623,6 +595,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) int atombios_get_encoder_mode(struct drm_encoder *encoder) { + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; struct drm_connector *connector; struct radeon_connector *radeon_connector; struct radeon_connector_atom_dig *dig_connector; @@ -636,9 +610,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) switch (connector->connector_type) { case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ - if (drm_detect_hdmi_monitor(radeon_connector->edid)) - return ATOM_ENCODER_MODE_HDMI; - else if (radeon_connector->use_digital) + if (drm_detect_hdmi_monitor(radeon_connector->edid)) { + /* fix me */ + if (ASIC_IS_DCE4(rdev)) + return ATOM_ENCODER_MODE_DVI; + else + return ATOM_ENCODER_MODE_HDMI; + } else if (radeon_connector->use_digital) return ATOM_ENCODER_MODE_DVI; else return ATOM_ENCODER_MODE_CRT; @@ -646,9 +624,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) case DRM_MODE_CONNECTOR_DVID: case DRM_MODE_CONNECTOR_HDMIA: default: - if (drm_detect_hdmi_monitor(radeon_connector->edid)) - return ATOM_ENCODER_MODE_HDMI; - else + if (drm_detect_hdmi_monitor(radeon_connector->edid)) { + /* fix me */ + if (ASIC_IS_DCE4(rdev)) + return ATOM_ENCODER_MODE_DVI; + else + return ATOM_ENCODER_MODE_HDMI; + } else return ATOM_ENCODER_MODE_DVI; break; case DRM_MODE_CONNECTOR_LVDS: @@ -660,9 +642,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) return ATOM_ENCODER_MODE_DP; - else if (drm_detect_hdmi_monitor(radeon_connector->edid)) - return ATOM_ENCODER_MODE_HDMI; - else + else if (drm_detect_hdmi_monitor(radeon_connector->edid)) { + /* fix me */ + if (ASIC_IS_DCE4(rdev)) + return ATOM_ENCODER_MODE_DVI; + else + return ATOM_ENCODER_MODE_HDMI; + } else return ATOM_ENCODER_MODE_DVI; break; case DRM_MODE_CONNECTOR_DVIA: @@ -729,13 +715,24 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - struct radeon_connector_atom_dig *dig_connector = - radeon_get_atom_connector_priv_from_encoder(encoder); + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); union dig_encoder_control args; int index = 0; uint8_t frev, crev; + int dp_clock = 0; + int dp_lane_count = 0; + + if (connector) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct radeon_connector_atom_dig *dig_connector = + radeon_connector->con_priv; - if (!dig || !dig_connector) + dp_clock = dig_connector->dp_clock; + dp_lane_count = dig_connector->dp_lane_count; + } + + /* no dig encoder assigned */ + if (dig->dig_encoder == -1) return; memset(&args, 0, sizeof(args)); @@ -757,9 +754,9 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) { - if (dig_connector->dp_clock == 270000) + if (dp_clock == 270000) args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; - args.v1.ucLaneNum = dig_connector->dp_lane_count; + args.v1.ucLaneNum = dp_lane_count; } else if (radeon_encoder->pixel_clock > 165000) args.v1.ucLaneNum = 8; else @@ -781,7 +778,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3; break; } - if (dig_connector->linkb) + if (dig->linkb) args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; else args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; @@ -804,38 +801,47 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - struct radeon_connector_atom_dig *dig_connector = - radeon_get_atom_connector_priv_from_encoder(encoder); - struct drm_connector *connector; - struct radeon_connector *radeon_connector; + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); union dig_transmitter_control args; int index = 0; uint8_t frev, crev; bool is_dp = false; int pll_id = 0; + int dp_clock = 0; + int dp_lane_count = 0; + int connector_object_id = 0; + int igp_lane_info = 0; - if (!dig || !dig_connector) - return; + if (connector) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct radeon_connector_atom_dig *dig_connector = + radeon_connector->con_priv; - connector = radeon_get_connector_for_encoder(encoder); - radeon_connector = to_radeon_connector(connector); + dp_clock = dig_connector->dp_clock; + dp_lane_count = dig_connector->dp_lane_count; + connector_object_id = + (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; + igp_lane_info = dig_connector->igp_lane_info; + } + + /* no dig encoder assigned */ + if (dig->dig_encoder == -1) + return; if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) is_dp = true; memset(&args, 0, sizeof(args)); - if (ASIC_IS_DCE32(rdev) || ASIC_IS_DCE4(rdev)) + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); - else { - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - index = GetIndexIntoMasterTable(COMMAND, DIG1TransmitterControl); - break; - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - index = GetIndexIntoMasterTable(COMMAND, DIG2TransmitterControl); - break; - } + break; + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl); + break; } if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) @@ -843,14 +849,14 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t args.v1.ucAction = action; if (action == ATOM_TRANSMITTER_ACTION_INIT) { - args.v1.usInitInfo = radeon_connector->connector_object_id; + args.v1.usInitInfo = connector_object_id; } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { args.v1.asMode.ucLaneSel = lane_num; args.v1.asMode.ucLaneSet = lane_set; } else { if (is_dp) args.v1.usPixelClock = - cpu_to_le16(dig_connector->dp_clock / 10); + cpu_to_le16(dp_clock / 10); else if (radeon_encoder->pixel_clock > 165000) args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); else @@ -858,13 +864,13 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t } if (ASIC_IS_DCE4(rdev)) { if (is_dp) - args.v3.ucLaneNum = dig_connector->dp_lane_count; + args.v3.ucLaneNum = dp_lane_count; else if (radeon_encoder->pixel_clock > 165000) args.v3.ucLaneNum = 8; else args.v3.ucLaneNum = 4; - if (dig_connector->linkb) { + if (dig->linkb) { args.v3.acConfig.ucLinkSel = 1; args.v3.acConfig.ucEncoderSel = 1; } @@ -904,7 +910,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t } } else if (ASIC_IS_DCE32(rdev)) { args.v2.acConfig.ucEncoderSel = dig->dig_encoder; - if (dig_connector->linkb) + if (dig->linkb) args.v2.acConfig.ucLinkSel = 1; switch (radeon_encoder->encoder_id) { @@ -938,23 +944,23 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t if ((rdev->flags & RADEON_IS_IGP) && (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) { if (is_dp || (radeon_encoder->pixel_clock <= 165000)) { - if (dig_connector->igp_lane_info & 0x1) + if (igp_lane_info & 0x1) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; - else if (dig_connector->igp_lane_info & 0x2) + else if (igp_lane_info & 0x2) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; - else if (dig_connector->igp_lane_info & 0x4) + else if (igp_lane_info & 0x4) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; - else if (dig_connector->igp_lane_info & 0x8) + else if (igp_lane_info & 0x8) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; } else { - if (dig_connector->igp_lane_info & 0x3) + if (igp_lane_info & 0x3) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; - else if (dig_connector->igp_lane_info & 0xc) + else if (igp_lane_info & 0xc) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; } } - if (dig_connector->linkb) + if (dig->linkb) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; else args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA; @@ -1072,8 +1078,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) if (is_dig) { switch (mode) { case DRM_MODE_DPMS_ON: - if (!ASIC_IS_DCE4(rdev)) - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); @@ -1085,8 +1090,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - if (!ASIC_IS_DCE4(rdev)) - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { if (ASIC_IS_DCE4(rdev)) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF); @@ -1290,24 +1294,22 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) uint32_t dig_enc_in_use = 0; if (ASIC_IS_DCE4(rdev)) { - struct radeon_connector_atom_dig *dig_connector = - radeon_get_atom_connector_priv_from_encoder(encoder); - + dig = radeon_encoder->enc_priv; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - if (dig_connector->linkb) + if (dig->linkb) return 1; else return 0; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - if (dig_connector->linkb) + if (dig->linkb) return 3; else return 2; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - if (dig_connector->linkb) + if (dig->linkb) return 5; else return 4; @@ -1641,6 +1643,7 @@ radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder) struct radeon_encoder_atom_dig * radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) { + int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL); if (!dig) @@ -1650,11 +1653,16 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) dig->coherent_mode = true; dig->dig_encoder = -1; + if (encoder_enum == 2) + dig->linkb = true; + else + dig->linkb = false; + return dig; } void -radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) +radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device) { struct radeon_device *rdev = dev->dev_private; struct drm_encoder *encoder; @@ -1663,7 +1671,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su /* see if we already added it */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { radeon_encoder = to_radeon_encoder(encoder); - if (radeon_encoder->encoder_id == encoder_id) { + if (radeon_encoder->encoder_enum == encoder_enum) { radeon_encoder->devices |= supported_device; return; } @@ -1691,7 +1699,8 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su radeon_encoder->enc_priv = NULL; - radeon_encoder->encoder_id = encoder_id; + radeon_encoder->encoder_enum = encoder_enum; + radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; radeon_encoder->devices = supported_device; radeon_encoder->rmx_type = RMX_OFF; radeon_encoder->underscan_type = UNDERSCAN_OFF; diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index dbf86962bdd..c74a8b20d94 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -118,7 +118,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, aligned_size = ALIGN(size, PAGE_SIZE); ret = radeon_gem_object_create(rdev, aligned_size, 0, RADEON_GEM_DOMAIN_VRAM, - false, ttm_bo_type_kernel, + false, true, &gobj); if (ret) { printk(KERN_ERR "failed to allocate framebuffer (%d)\n", diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index bfd2ce5f537..6a13ee38a5b 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c @@ -99,6 +99,13 @@ static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state) } } + /* switch the pads to ddc mode */ + if (ASIC_IS_DCE3(rdev) && rec->hw_capable) { + temp = RREG32(rec->mask_clk_reg); + temp &= ~(1 << 16); + WREG32(rec->mask_clk_reg, temp); + } + /* clear the output pin values */ temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask; WREG32(rec->a_clk_reg, temp); @@ -206,7 +213,7 @@ static void post_xfer(struct i2c_adapter *i2c_adap) static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) { - u32 sclk = radeon_get_engine_clock(rdev); + u32 sclk = rdev->pm.current_sclk; u32 prescale = 0; u32 nm; u8 n, m, loop; diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 059bfa4098d..a108c7ed14f 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -121,11 +121,12 @@ int radeon_irq_kms_init(struct radeon_device *rdev) * chips. Disable MSI on them for now. */ if ((rdev->family >= CHIP_RV380) && - (!(rdev->flags & RADEON_IS_IGP))) { + (!(rdev->flags & RADEON_IS_IGP)) && + (!(rdev->flags & RADEON_IS_AGP))) { int ret = pci_enable_msi(rdev->pdev); if (!ret) { rdev->msi_enabled = 1; - DRM_INFO("radeon: using MSI.\n"); + dev_info(rdev->dev, "radeon: using MSI.\n"); } } rdev->irq.installed = true; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index b1c8ace5f08..5eee3c41d12 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -161,6 +161,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) DRM_DEBUG_KMS("tiling config is r6xx+ only!\n"); return -EINVAL; } + break; case RADEON_INFO_WANT_HYPERZ: /* The "value" here is both an input and output parameter. * If the input value is 1, filp requests hyper-z access. @@ -323,45 +324,45 @@ KMS_INVALID_IOCTL(radeon_surface_free_kms) struct drm_ioctl_desc radeon_ioctls_kms[] = { - DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free_kms, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH), /* KMS */ - DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), }; int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 989df519a1e..305049afde1 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -272,7 +272,7 @@ static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div, if (!ref_div) return 1; - vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div; + vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div; /* * This is horribly crude: the VCO frequency range is divided into diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index b8149cbc0c7..0b8397000f4 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -1345,7 +1345,7 @@ static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct ra } void -radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) +radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device) { struct radeon_device *rdev = dev->dev_private; struct drm_encoder *encoder; @@ -1354,7 +1354,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t /* see if we already added it */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { radeon_encoder = to_radeon_encoder(encoder); - if (radeon_encoder->encoder_id == encoder_id) { + if (radeon_encoder->encoder_enum == encoder_enum) { radeon_encoder->devices |= supported_device; return; } @@ -1374,7 +1374,8 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t radeon_encoder->enc_priv = NULL; - radeon_encoder->encoder_id = encoder_id; + radeon_encoder->encoder_enum = encoder_enum; + radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; radeon_encoder->devices = supported_device; radeon_encoder->rmx_type = RMX_OFF; diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 5bbc086b926..efbe975312d 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -342,6 +342,7 @@ struct radeon_atom_ss { }; struct radeon_encoder_atom_dig { + bool linkb; /* atom dig */ bool coherent_mode; int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */ @@ -360,6 +361,7 @@ struct radeon_encoder_atom_dac { struct radeon_encoder { struct drm_encoder base; + uint32_t encoder_enum; uint32_t encoder_id; uint32_t devices; uint32_t active_device; @@ -378,7 +380,6 @@ struct radeon_encoder { struct radeon_connector_atom_dig { uint32_t igp_lane_info; - bool linkb; /* displayport */ struct radeon_i2c_chan *dp_i2c_bus; u8 dpcd[8]; @@ -599,7 +600,6 @@ extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct d void radeon_enc_destroy(struct drm_encoder *encoder); void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); void radeon_combios_asic_init(struct drm_device *dev); -extern int radeon_static_clocks_init(struct drm_device *dev); bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 58038f5cab3..f87efec7623 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -226,6 +226,11 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) { int i; + /* no need to take locks, etc. if nothing's going to change */ + if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && + (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) + return; + mutex_lock(&rdev->ddev->struct_mutex); mutex_lock(&rdev->vram_mutex); mutex_lock(&rdev->cp.mutex); @@ -632,8 +637,6 @@ void radeon_pm_fini(struct radeon_device *rdev) } radeon_hwmon_fini(rdev); - if (rdev->pm.i2c_bus) - radeon_i2c_destroy(rdev->pm.i2c_bus); } void radeon_pm_compute_clocks(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index b3ba44c0a81..4ae5a3d1074 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c @@ -3228,34 +3228,34 @@ void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) } struct drm_ioctl_desc radeon_ioctls[] = { - DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH) + DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH) }; int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index f1c79681011..bfa59db374d 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -905,6 +905,54 @@ static void rv770_gpu_init(struct radeon_device *rdev) } +static int rv770_vram_scratch_init(struct radeon_device *rdev) +{ + int r; + u64 gpu_addr; + + if (rdev->vram_scratch.robj == NULL) { + r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, + true, RADEON_GEM_DOMAIN_VRAM, + &rdev->vram_scratch.robj); + if (r) { + return r; + } + } + + r = radeon_bo_reserve(rdev->vram_scratch.robj, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_pin(rdev->vram_scratch.robj, + RADEON_GEM_DOMAIN_VRAM, &gpu_addr); + if (r) { + radeon_bo_unreserve(rdev->vram_scratch.robj); + return r; + } + r = radeon_bo_kmap(rdev->vram_scratch.robj, + (void **)&rdev->vram_scratch.ptr); + if (r) + radeon_bo_unpin(rdev->vram_scratch.robj); + radeon_bo_unreserve(rdev->vram_scratch.robj); + + return r; +} + +static void rv770_vram_scratch_fini(struct radeon_device *rdev) +{ + int r; + + if (rdev->vram_scratch.robj == NULL) { + return; + } + r = radeon_bo_reserve(rdev->vram_scratch.robj, false); + if (likely(r == 0)) { + radeon_bo_kunmap(rdev->vram_scratch.robj); + radeon_bo_unpin(rdev->vram_scratch.robj); + radeon_bo_unreserve(rdev->vram_scratch.robj); + } + radeon_bo_unref(&rdev->vram_scratch.robj); +} + int rv770_mc_init(struct radeon_device *rdev) { u32 tmp; @@ -970,6 +1018,9 @@ static int rv770_startup(struct radeon_device *rdev) if (r) return r; } + r = rv770_vram_scratch_init(rdev); + if (r) + return r; rv770_gpu_init(rdev); r = r600_blit_init(rdev); if (r) { @@ -1023,11 +1074,6 @@ int rv770_resume(struct radeon_device *rdev) */ /* post card */ atom_asic_init(rdev->mode_info.atom_context); - /* Initialize clocks */ - r = radeon_clocks_init(rdev); - if (r) { - return r; - } r = rv770_startup(rdev); if (r) { @@ -1118,9 +1164,6 @@ int rv770_init(struct radeon_device *rdev) radeon_surface_init(rdev); /* Initialize clocks */ radeon_get_clock_info(rdev->ddev); - r = radeon_clocks_init(rdev); - if (r) - return r; /* Fence driver */ r = radeon_fence_driver_init(rdev); if (r) @@ -1195,9 +1238,9 @@ void rv770_fini(struct radeon_device *rdev) r600_irq_fini(rdev); radeon_irq_kms_fini(rdev); rv770_pcie_gart_fini(rdev); + rv770_vram_scratch_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); - radeon_clocks_fini(rdev); radeon_agp_fini(rdev); radeon_bo_fini(rdev); radeon_atombios_fini(rdev); diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c index 976dc8d2528..bf5f83ea14f 100644 --- a/drivers/gpu/drm/savage/savage_bci.c +++ b/drivers/gpu/drm/savage/savage_bci.c @@ -1082,10 +1082,10 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) } struct drm_ioctl_desc savage_ioctls[] = { - DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH), - DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH), - DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH), + DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH), + DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH), + DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH), }; int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls); diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c index 07d0f2979ca..7fe2b63412c 100644 --- a/drivers/gpu/drm/sis/sis_mm.c +++ b/drivers/gpu/drm/sis/sis_mm.c @@ -320,12 +320,12 @@ void sis_reclaim_buffers_locked(struct drm_device *dev, } struct drm_ioctl_desc sis_ioctls[] = { - DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH), - DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH), - DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH), - DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH), - DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH), + DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH), + DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH), + DRM_IOCTL_DEF_DRV(SIS_AGP_FREE, sis_drm_free, DRM_AUTH), + DRM_IOCTL_DEF_DRV(SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), }; int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls); diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c index 68dda74a50a..cc0ffa9abd0 100644 --- a/drivers/gpu/drm/via/via_dma.c +++ b/drivers/gpu/drm/via/via_dma.c @@ -722,20 +722,20 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file * } struct drm_ioctl_desc via_ioctls[] = { - DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH), - DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH), - DRM_IOCTL_DEF(DRM_VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER), - DRM_IOCTL_DEF(DRM_VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER), - DRM_IOCTL_DEF(DRM_VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER), - DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH), - DRM_IOCTL_DEF(DRM_VIA_DMA_INIT, via_dma_init, DRM_AUTH), - DRM_IOCTL_DEF(DRM_VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH), - DRM_IOCTL_DEF(DRM_VIA_FLUSH, via_flush_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH), - DRM_IOCTL_DEF(DRM_VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH), - DRM_IOCTL_DEF(DRM_VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH), - DRM_IOCTL_DEF(DRM_VIA_DMA_BLIT, via_dma_blit, DRM_AUTH), - DRM_IOCTL_DEF(DRM_VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH) + DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH), + DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH), + DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER), + DRM_IOCTL_DEF_DRV(VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER), + DRM_IOCTL_DEF_DRV(VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER), + DRM_IOCTL_DEF_DRV(VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH), + DRM_IOCTL_DEF_DRV(VIA_DMA_INIT, via_dma_init, DRM_AUTH), + DRM_IOCTL_DEF_DRV(VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH), + DRM_IOCTL_DEF_DRV(VIA_FLUSH, via_flush_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH), + DRM_IOCTL_DEF_DRV(VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH), + DRM_IOCTL_DEF_DRV(VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH), + DRM_IOCTL_DEF_DRV(VIA_DMA_BLIT, via_dma_blit, DRM_AUTH), + DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH) }; int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 9dd395b9021..72ec2e2b6e9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -99,47 +99,47 @@ */ #define VMW_IOCTL_DEF(ioctl, func, flags) \ - [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func} + [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl} /** * Ioctl definitions. */ static struct drm_ioctl_desc vmw_ioctls[] = { - VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl, + VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, DRM_AUTH | DRM_UNLOCKED), - VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, + VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, DRM_AUTH | DRM_UNLOCKED), - VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, + VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, DRM_AUTH | DRM_UNLOCKED), - VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS, + VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, vmw_kms_cursor_bypass_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), - VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl, + VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), - VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, + VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), - VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl, + VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), - VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl, + VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, DRM_AUTH | DRM_UNLOCKED), - VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, + VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, DRM_AUTH | DRM_UNLOCKED), - VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl, + VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, DRM_AUTH | DRM_UNLOCKED), - VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, + VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, DRM_AUTH | DRM_UNLOCKED), - VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl, + VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, DRM_AUTH | DRM_UNLOCKED), - VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl, + VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, DRM_AUTH | DRM_UNLOCKED), - VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, + VMW_IOCTL_DEF(VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED), - VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl, + VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_wait_ioctl, DRM_AUTH | DRM_UNLOCKED), - VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl, + VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED) }; diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index e635199a0cd..0c52899be96 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1299,6 +1299,7 @@ static const struct hid_device_id hid_blacklist[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) }, { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) }, { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) }, diff --git a/drivers/hid/hid-egalax.c b/drivers/hid/hid-egalax.c index f44bdc084cb..8ca7f65cf2f 100644 --- a/drivers/hid/hid-egalax.c +++ b/drivers/hid/hid-egalax.c @@ -159,6 +159,13 @@ static int egalax_event(struct hid_device *hid, struct hid_field *field, { struct egalax_data *td = hid_get_drvdata(hid); + /* Note, eGalax has two product lines: the first is resistive and + * uses a standard parallel multitouch protocol (product ID == + * 48xx). The second is capacitive and uses an unusual "serial" + * protocol with a different message for each multitouch finger + * (product ID == 72xx). We do not yet generate a correct event + * sequence for the capacitive/serial protocol. + */ if (hid->claimed & HID_CLAIMED_INPUT) { struct input_dev *input = field->hidinput->input; @@ -246,6 +253,8 @@ static void egalax_remove(struct hid_device *hdev) static const struct hid_device_id egalax_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) }, { } }; MODULE_DEVICE_TABLE(hid, egalax_devices); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index d3fc13ae094..85c6d13c9ff 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -188,6 +188,7 @@ #define USB_VENDOR_ID_DWAV 0x0eef #define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH 0x480d +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1 0x720c #define USB_VENDOR_ID_ELECOM 0x056e #define USB_DEVICE_ID_ELECOM_BM084 0x0061 diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c index 346f0e34987..bc2e0774062 100644 --- a/drivers/hid/hid-picolcd.c +++ b/drivers/hid/hid-picolcd.c @@ -547,11 +547,11 @@ static void picolcd_fb_destroy(struct fb_info *info) ref_cnt--; mutex_lock(&info->lock); (*ref_cnt)--; - may_release = !ref_cnt; + may_release = !*ref_cnt; mutex_unlock(&info->lock); if (may_release) { - framebuffer_release(info); vfree((u8 *)info->fix.smem_start); + framebuffer_release(info); } } diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 254a003af04..0a29c51114a 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -266,13 +266,15 @@ static int hiddev_open(struct inode *inode, struct file *file) { struct hiddev_list *list; struct usb_interface *intf; + struct hid_device *hid; struct hiddev *hiddev; int res; intf = usb_find_interface(&hiddev_driver, iminor(inode)); if (!intf) return -ENODEV; - hiddev = usb_get_intfdata(intf); + hid = usb_get_intfdata(intf); + hiddev = hid->hiddev; if (!(list = kzalloc(sizeof(struct hiddev_list), GFP_KERNEL))) return -ENOMEM; @@ -587,7 +589,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct hiddev_list *list = file->private_data; struct hiddev *hiddev = list->hiddev; struct hid_device *hid = hiddev->hid; - struct usb_device *dev = hid_to_usb_dev(hid); + struct usb_device *dev; struct hiddev_collection_info cinfo; struct hiddev_report_info rinfo; struct hiddev_field_info finfo; @@ -601,9 +603,11 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) /* Called without BKL by compat methods so no BKL taken */ /* FIXME: Who or what stop this racing with a disconnect ?? */ - if (!hiddev->exist) + if (!hiddev->exist || !hid) return -EIO; + dev = hid_to_usb_dev(hid); + switch (cmd) { case HIDIOCGVERSION: @@ -888,7 +892,6 @@ int hiddev_connect(struct hid_device *hid, unsigned int force) hid->hiddev = hiddev; hiddev->hid = hid; hiddev->exist = 1; - usb_set_intfdata(usbhid->intf, usbhid); retval = usb_register_dev(usbhid->intf, &hiddev_class); if (retval) { err_hid("Not able to get a minor for this device."); diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index f3adf18bfa0..4d4d09bdec0 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -332,11 +332,11 @@ config SENSORS_F71805F will be called f71805f. config SENSORS_F71882FG - tristate "Fintek F71808E, F71858FG, F71862FG, F71882FG, F71889FG and F8000" + tristate "Fintek F71858FG, F71862FG, F71882FG, F71889FG and F8000" depends on EXPERIMENTAL help - If you say yes here you get support for hardware monitoring features - of the Fintek F71808E, F71858FG, F71862FG/71863FG, F71882FG/F71883FG, + If you say yes here you get support for hardware monitoring + features of the Fintek F71858FG, F71862FG/71863FG, F71882FG/F71883FG, F71889FG and F8000 Super-I/O chips. This driver can also be built as a module. If so, the module @@ -465,6 +465,7 @@ config SENSORS_JZ4740 config SENSORS_JC42 tristate "JEDEC JC42.4 compliant temperature sensors" + depends on I2C help If you say yes here you get support for Jedec JC42.4 compliant temperature sensors. Support will include, but not be limited to, @@ -711,7 +712,8 @@ config SENSORS_PC87427 functions of the National Semiconductor PC87427 Super-I/O chip. The chip has two distinct logical devices, one for fan speed monitoring and control, and one for voltage and temperature - monitoring. Only fan speed monitoring is supported right now. + monitoring. Fan speed monitoring and control are supported, as + well as temperature monitoring. Voltages aren't supported yet. This driver can also be built as a module. If so, the module will be called pc87427. @@ -804,6 +806,16 @@ config SENSORS_EMC1403 Threshold values can be configured using sysfs. Data from the different diodes are accessible via sysfs. +config SENSORS_EMC2103 + tristate "SMSC EMC2103" + depends on I2C + help + If you say yes here you get support for the temperature + and fan sensors of the SMSC EMC2103 chips. + + This driver can also be built as a module. If so, the module + will be called emc2103. + config SENSORS_SMSC47M1 tristate "SMSC LPC47M10x and compatibles" help diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 13d913e34db..e3c2484f6c5 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -43,6 +43,7 @@ obj-$(CONFIG_SENSORS_PKGTEMP) += pkgtemp.o obj-$(CONFIG_SENSORS_DME1737) += dme1737.o obj-$(CONFIG_SENSORS_DS1621) += ds1621.o obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o +obj-$(CONFIG_SENSORS_EMC2103) += emc2103.o obj-$(CONFIG_SENSORS_F71805F) += f71805f.o obj-$(CONFIG_SENSORS_F71882FG) += f71882fg.o obj-$(CONFIG_SENSORS_F75375S) += f75375s.o diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c index b300a2048af..52319340e18 100644 --- a/drivers/hwmon/ads7871.c +++ b/drivers/hwmon/ads7871.c @@ -160,30 +160,12 @@ static const struct attribute_group ads7871_group = { static int __devinit ads7871_probe(struct spi_device *spi) { - int status, ret, err = 0; + int ret, err; uint8_t val; struct ads7871_data *pdata; dev_dbg(&spi->dev, "probe\n"); - pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL); - if (!pdata) { - err = -ENOMEM; - goto exit; - } - - status = sysfs_create_group(&spi->dev.kobj, &ads7871_group); - if (status < 0) - goto error_free; - - pdata->hwmon_dev = hwmon_device_register(&spi->dev); - if (IS_ERR(pdata->hwmon_dev)) { - err = PTR_ERR(pdata->hwmon_dev); - goto error_remove; - } - - spi_set_drvdata(spi, pdata); - /* Configure the SPI bus */ spi->mode = (SPI_MODE_0); spi->bits_per_word = 8; @@ -201,6 +183,24 @@ static int __devinit ads7871_probe(struct spi_device *spi) we need to make sure we really have a chip*/ if (val != ret) { err = -ENODEV; + goto exit; + } + + pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL); + if (!pdata) { + err = -ENOMEM; + goto exit; + } + + err = sysfs_create_group(&spi->dev.kobj, &ads7871_group); + if (err < 0) + goto error_free; + + spi_set_drvdata(spi, pdata); + + pdata->hwmon_dev = hwmon_device_register(&spi->dev); + if (IS_ERR(pdata->hwmon_dev)) { + err = PTR_ERR(pdata->hwmon_dev); goto error_remove; } diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c index 3b973f30b1f..89b4f3babe8 100644 --- a/drivers/hwmon/asc7621.c +++ b/drivers/hwmon/asc7621.c @@ -1150,9 +1150,6 @@ static int asc7621_detect(struct i2c_client *client, { struct i2c_adapter *adapter = client->adapter; int company, verstep, chip_index; - struct device *dev; - - dev = &client->dev; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; @@ -1169,13 +1166,11 @@ static int asc7621_detect(struct i2c_client *client, if (company == asc7621_chips[chip_index].company_id && verstep == asc7621_chips[chip_index].verstep_id) { - strlcpy(client->name, asc7621_chips[chip_index].name, - I2C_NAME_SIZE); strlcpy(info->type, asc7621_chips[chip_index].name, I2C_NAME_SIZE); - dev_info(&adapter->dev, "Matched %s\n", - asc7621_chips[chip_index].name); + dev_info(&adapter->dev, "Matched %s at 0x%02x\n", + asc7621_chips[chip_index].name, client->addr); return 0; } } diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index c070c9714cb..de8111114f4 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -518,7 +518,6 @@ static struct notifier_block coretemp_cpu_notifier __refdata = { static int __init coretemp_init(void) { int i, err = -ENODEV; - struct pdev_entry *p, *n; /* quick check if we run Intel */ if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL) diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c index 0e4b5642638..5b58b20dead 100644 --- a/drivers/hwmon/emc1403.c +++ b/drivers/hwmon/emc1403.c @@ -89,6 +89,35 @@ static ssize_t store_temp(struct device *dev, return count; } +static ssize_t store_bit(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct thermal_data *data = i2c_get_clientdata(client); + struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr); + unsigned long val; + int retval; + + if (strict_strtoul(buf, 10, &val)) + return -EINVAL; + + mutex_lock(&data->mutex); + retval = i2c_smbus_read_byte_data(client, sda->nr); + if (retval < 0) + goto fail; + + retval &= ~sda->index; + if (val) + retval |= sda->index; + + retval = i2c_smbus_write_byte_data(client, sda->index, retval); + if (retval == 0) + retval = count; +fail: + mutex_unlock(&data->mutex); + return retval; +} + static ssize_t show_hyst(struct device *dev, struct device_attribute *attr, char *buf) { @@ -200,6 +229,9 @@ static SENSOR_DEVICE_ATTR_2(temp3_crit_alarm, S_IRUGO, static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO | S_IWUSR, show_hyst, store_hyst, 0x1A); +static SENSOR_DEVICE_ATTR_2(power_state, S_IRUGO | S_IWUSR, + show_bit, store_bit, 0x03, 0x40); + static struct attribute *mid_att_thermal[] = { &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, @@ -225,6 +257,7 @@ static struct attribute *mid_att_thermal[] = { &sensor_dev_attr_temp3_max_alarm.dev_attr.attr, &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr, + &sensor_dev_attr_power_state.dev_attr.attr, NULL }; diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c new file mode 100644 index 00000000000..af914ad93ec --- /dev/null +++ b/drivers/hwmon/emc2103.c @@ -0,0 +1,740 @@ +/* + emc2103.c - Support for SMSC EMC2103 + Copyright (c) 2010 SMSC + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/jiffies.h> +#include <linux/i2c.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <linux/err.h> +#include <linux/mutex.h> + +/* Addresses scanned */ +static const unsigned short normal_i2c[] = { 0x2E, I2C_CLIENT_END }; + +static const u8 REG_TEMP[4] = { 0x00, 0x02, 0x04, 0x06 }; +static const u8 REG_TEMP_MIN[4] = { 0x3c, 0x38, 0x39, 0x3a }; +static const u8 REG_TEMP_MAX[4] = { 0x34, 0x30, 0x31, 0x32 }; + +#define REG_CONF1 0x20 +#define REG_TEMP_MAX_ALARM 0x24 +#define REG_TEMP_MIN_ALARM 0x25 +#define REG_FAN_CONF1 0x42 +#define REG_FAN_TARGET_LO 0x4c +#define REG_FAN_TARGET_HI 0x4d +#define REG_FAN_TACH_HI 0x4e +#define REG_FAN_TACH_LO 0x4f +#define REG_PRODUCT_ID 0xfd +#define REG_MFG_ID 0xfe + +/* equation 4 from datasheet: rpm = (3932160 * multipler) / count */ +#define FAN_RPM_FACTOR 3932160 + +/* 2103-2 and 2103-4's 3rd temperature sensor can be connected to two diodes + * in anti-parallel mode, and in this configuration both can be read + * independently (so we have 4 temperature inputs). The device can't + * detect if it's connected in this mode, so we have to manually enable + * it. Default is to leave the device in the state it's already in (-1). + * This parameter allows APD mode to be optionally forced on or off */ +static int apd = -1; +module_param(apd, bool, 0); +MODULE_PARM_DESC(init, "Set to zero to disable anti-parallel diode mode"); + +struct temperature { + s8 degrees; + u8 fraction; /* 0-7 multiples of 0.125 */ +}; + +struct emc2103_data { + struct device *hwmon_dev; + struct mutex update_lock; + bool valid; /* registers are valid */ + bool fan_rpm_control; + int temp_count; /* num of temp sensors */ + unsigned long last_updated; /* in jiffies */ + struct temperature temp[4]; /* internal + 3 external */ + s8 temp_min[4]; /* no fractional part */ + s8 temp_max[4]; /* no fractional part */ + u8 temp_min_alarm; + u8 temp_max_alarm; + u8 fan_multiplier; + u16 fan_tach; + u16 fan_target; +}; + +static int read_u8_from_i2c(struct i2c_client *client, u8 i2c_reg, u8 *output) +{ + int status = i2c_smbus_read_byte_data(client, i2c_reg); + if (status < 0) { + dev_warn(&client->dev, "reg 0x%02x, err %d\n", + i2c_reg, status); + } else { + *output = status; + } + return status; +} + +static void read_temp_from_i2c(struct i2c_client *client, u8 i2c_reg, + struct temperature *temp) +{ + u8 degrees, fractional; + + if (read_u8_from_i2c(client, i2c_reg, °rees) < 0) + return; + + if (read_u8_from_i2c(client, i2c_reg + 1, &fractional) < 0) + return; + + temp->degrees = degrees; + temp->fraction = (fractional & 0xe0) >> 5; +} + +static void read_fan_from_i2c(struct i2c_client *client, u16 *output, + u8 hi_addr, u8 lo_addr) +{ + u8 high_byte, lo_byte; + + if (read_u8_from_i2c(client, hi_addr, &high_byte) < 0) + return; + + if (read_u8_from_i2c(client, lo_addr, &lo_byte) < 0) + return; + + *output = ((u16)high_byte << 5) | (lo_byte >> 3); +} + +static void write_fan_target_to_i2c(struct i2c_client *client, u16 new_target) +{ + u8 high_byte = (new_target & 0x1fe0) >> 5; + u8 low_byte = (new_target & 0x001f) << 3; + i2c_smbus_write_byte_data(client, REG_FAN_TARGET_LO, low_byte); + i2c_smbus_write_byte_data(client, REG_FAN_TARGET_HI, high_byte); +} + +static void read_fan_config_from_i2c(struct i2c_client *client) + +{ + struct emc2103_data *data = i2c_get_clientdata(client); + u8 conf1; + + if (read_u8_from_i2c(client, REG_FAN_CONF1, &conf1) < 0) + return; + + data->fan_multiplier = 1 << ((conf1 & 0x60) >> 5); + data->fan_rpm_control = (conf1 & 0x80) != 0; +} + +static struct emc2103_data *emc2103_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct emc2103_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ + HZ / 2) + || !data->valid) { + int i; + + for (i = 0; i < data->temp_count; i++) { + read_temp_from_i2c(client, REG_TEMP[i], &data->temp[i]); + read_u8_from_i2c(client, REG_TEMP_MIN[i], + &data->temp_min[i]); + read_u8_from_i2c(client, REG_TEMP_MAX[i], + &data->temp_max[i]); + } + + read_u8_from_i2c(client, REG_TEMP_MIN_ALARM, + &data->temp_min_alarm); + read_u8_from_i2c(client, REG_TEMP_MAX_ALARM, + &data->temp_max_alarm); + + read_fan_from_i2c(client, &data->fan_tach, + REG_FAN_TACH_HI, REG_FAN_TACH_LO); + read_fan_from_i2c(client, &data->fan_target, + REG_FAN_TARGET_HI, REG_FAN_TARGET_LO); + read_fan_config_from_i2c(client); + + data->last_updated = jiffies; + data->valid = true; + } + + mutex_unlock(&data->update_lock); + + return data; +} + +static ssize_t +show_temp(struct device *dev, struct device_attribute *da, char *buf) +{ + int nr = to_sensor_dev_attr(da)->index; + struct emc2103_data *data = emc2103_update_device(dev); + int millidegrees = data->temp[nr].degrees * 1000 + + data->temp[nr].fraction * 125; + return sprintf(buf, "%d\n", millidegrees); +} + +static ssize_t +show_temp_min(struct device *dev, struct device_attribute *da, char *buf) +{ + int nr = to_sensor_dev_attr(da)->index; + struct emc2103_data *data = emc2103_update_device(dev); + int millidegrees = data->temp_min[nr] * 1000; + return sprintf(buf, "%d\n", millidegrees); +} + +static ssize_t +show_temp_max(struct device *dev, struct device_attribute *da, char *buf) +{ + int nr = to_sensor_dev_attr(da)->index; + struct emc2103_data *data = emc2103_update_device(dev); + int millidegrees = data->temp_max[nr] * 1000; + return sprintf(buf, "%d\n", millidegrees); +} + +static ssize_t +show_temp_fault(struct device *dev, struct device_attribute *da, char *buf) +{ + int nr = to_sensor_dev_attr(da)->index; + struct emc2103_data *data = emc2103_update_device(dev); + bool fault = (data->temp[nr].degrees == -128); + return sprintf(buf, "%d\n", fault ? 1 : 0); +} + +static ssize_t +show_temp_min_alarm(struct device *dev, struct device_attribute *da, char *buf) +{ + int nr = to_sensor_dev_attr(da)->index; + struct emc2103_data *data = emc2103_update_device(dev); + bool alarm = data->temp_min_alarm & (1 << nr); + return sprintf(buf, "%d\n", alarm ? 1 : 0); +} + +static ssize_t +show_temp_max_alarm(struct device *dev, struct device_attribute *da, char *buf) +{ + int nr = to_sensor_dev_attr(da)->index; + struct emc2103_data *data = emc2103_update_device(dev); + bool alarm = data->temp_max_alarm & (1 << nr); + return sprintf(buf, "%d\n", alarm ? 1 : 0); +} + +static ssize_t set_temp_min(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + int nr = to_sensor_dev_attr(da)->index; + struct i2c_client *client = to_i2c_client(dev); + struct emc2103_data *data = i2c_get_clientdata(client); + long val; + + int result = strict_strtol(buf, 10, &val); + if (result < 0) + return -EINVAL; + + val = DIV_ROUND_CLOSEST(val, 1000); + if ((val < -63) || (val > 127)) + return -EINVAL; + + mutex_lock(&data->update_lock); + data->temp_min[nr] = val; + i2c_smbus_write_byte_data(client, REG_TEMP_MIN[nr], val); + mutex_unlock(&data->update_lock); + + return count; +} + +static ssize_t set_temp_max(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + int nr = to_sensor_dev_attr(da)->index; + struct i2c_client *client = to_i2c_client(dev); + struct emc2103_data *data = i2c_get_clientdata(client); + long val; + + int result = strict_strtol(buf, 10, &val); + if (result < 0) + return -EINVAL; + + val = DIV_ROUND_CLOSEST(val, 1000); + if ((val < -63) || (val > 127)) + return -EINVAL; + + mutex_lock(&data->update_lock); + data->temp_max[nr] = val; + i2c_smbus_write_byte_data(client, REG_TEMP_MAX[nr], val); + mutex_unlock(&data->update_lock); + + return count; +} + +static ssize_t +show_fan(struct device *dev, struct device_attribute *da, char *buf) +{ + struct emc2103_data *data = emc2103_update_device(dev); + int rpm = 0; + if (data->fan_tach != 0) + rpm = (FAN_RPM_FACTOR * data->fan_multiplier) / data->fan_tach; + return sprintf(buf, "%d\n", rpm); +} + +static ssize_t +show_fan_div(struct device *dev, struct device_attribute *da, char *buf) +{ + struct emc2103_data *data = emc2103_update_device(dev); + int fan_div = 8 / data->fan_multiplier; + return sprintf(buf, "%d\n", fan_div); +} + +/* Note: we also update the fan target here, because its value is + determined in part by the fan clock divider. This follows the principle + of least surprise; the user doesn't expect the fan target to change just + because the divider changed. */ +static ssize_t set_fan_div(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct emc2103_data *data = emc2103_update_device(dev); + struct i2c_client *client = to_i2c_client(dev); + int new_range_bits, old_div = 8 / data->fan_multiplier; + long new_div; + + int status = strict_strtol(buf, 10, &new_div); + if (status < 0) + return -EINVAL; + + if (new_div == old_div) /* No change */ + return count; + + switch (new_div) { + case 1: + new_range_bits = 3; + break; + case 2: + new_range_bits = 2; + break; + case 4: + new_range_bits = 1; + break; + case 8: + new_range_bits = 0; + break; + default: + return -EINVAL; + } + + mutex_lock(&data->update_lock); + + status = i2c_smbus_read_byte_data(client, REG_FAN_CONF1); + if (status < 0) { + dev_dbg(&client->dev, "reg 0x%02x, err %d\n", + REG_FAN_CONF1, status); + mutex_unlock(&data->update_lock); + return -EIO; + } + status &= 0x9F; + status |= (new_range_bits << 5); + i2c_smbus_write_byte_data(client, REG_FAN_CONF1, status); + + data->fan_multiplier = 8 / new_div; + + /* update fan target if high byte is not disabled */ + if ((data->fan_target & 0x1fe0) != 0x1fe0) { + u16 new_target = (data->fan_target * old_div) / new_div; + data->fan_target = min(new_target, (u16)0x1fff); + write_fan_target_to_i2c(client, data->fan_target); + } + + /* invalidate data to force re-read from hardware */ + data->valid = false; + + mutex_unlock(&data->update_lock); + return count; +} + +static ssize_t +show_fan_target(struct device *dev, struct device_attribute *da, char *buf) +{ + struct emc2103_data *data = emc2103_update_device(dev); + int rpm = 0; + + /* high byte of 0xff indicates disabled so return 0 */ + if ((data->fan_target != 0) && ((data->fan_target & 0x1fe0) != 0x1fe0)) + rpm = (FAN_RPM_FACTOR * data->fan_multiplier) + / data->fan_target; + + return sprintf(buf, "%d\n", rpm); +} + +static ssize_t set_fan_target(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct emc2103_data *data = emc2103_update_device(dev); + struct i2c_client *client = to_i2c_client(dev); + long rpm_target; + + int result = strict_strtol(buf, 10, &rpm_target); + if (result < 0) + return -EINVAL; + + /* Datasheet states 16384 as maximum RPM target (table 3.2) */ + if ((rpm_target < 0) || (rpm_target > 16384)) + return -EINVAL; + + mutex_lock(&data->update_lock); + + if (rpm_target == 0) + data->fan_target = 0x1fff; + else + data->fan_target = SENSORS_LIMIT( + (FAN_RPM_FACTOR * data->fan_multiplier) / rpm_target, + 0, 0x1fff); + + write_fan_target_to_i2c(client, data->fan_target); + + mutex_unlock(&data->update_lock); + return count; +} + +static ssize_t +show_fan_fault(struct device *dev, struct device_attribute *da, char *buf) +{ + struct emc2103_data *data = emc2103_update_device(dev); + bool fault = ((data->fan_tach & 0x1fe0) == 0x1fe0); + return sprintf(buf, "%d\n", fault ? 1 : 0); +} + +static ssize_t +show_pwm_enable(struct device *dev, struct device_attribute *da, char *buf) +{ + struct emc2103_data *data = emc2103_update_device(dev); + return sprintf(buf, "%d\n", data->fan_rpm_control ? 3 : 0); +} + +static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct emc2103_data *data = i2c_get_clientdata(client); + long new_value; + u8 conf_reg; + + int result = strict_strtol(buf, 10, &new_value); + if (result < 0) + return -EINVAL; + + mutex_lock(&data->update_lock); + switch (new_value) { + case 0: + data->fan_rpm_control = false; + break; + case 3: + data->fan_rpm_control = true; + break; + default: + mutex_unlock(&data->update_lock); + return -EINVAL; + } + + read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg); + + if (data->fan_rpm_control) + conf_reg |= 0x80; + else + conf_reg &= ~0x80; + + i2c_smbus_write_byte_data(client, REG_FAN_CONF1, conf_reg); + + mutex_unlock(&data->update_lock); + return count; +} + +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0); +static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR, show_temp_min, + set_temp_min, 0); +static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp_max, + set_temp_max, 0); +static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0); +static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_temp_min_alarm, + NULL, 0); +static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_temp_max_alarm, + NULL, 0); + +static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1); +static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, show_temp_min, + set_temp_min, 1); +static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max, + set_temp_max, 1); +static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1); +static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_temp_min_alarm, + NULL, 1); +static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_temp_max_alarm, + NULL, 1); + +static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2); +static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO | S_IWUSR, show_temp_min, + set_temp_min, 2); +static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max, + set_temp_max, 2); +static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_temp_fault, NULL, 2); +static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_temp_min_alarm, + NULL, 2); +static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_temp_max_alarm, + NULL, 2); + +static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3); +static SENSOR_DEVICE_ATTR(temp4_min, S_IRUGO | S_IWUSR, show_temp_min, + set_temp_min, 3); +static SENSOR_DEVICE_ATTR(temp4_max, S_IRUGO | S_IWUSR, show_temp_max, + set_temp_max, 3); +static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_temp_fault, NULL, 3); +static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, show_temp_min_alarm, + NULL, 3); +static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_temp_max_alarm, + NULL, 3); + +static DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL); +static DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR, show_fan_div, set_fan_div); +static DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, show_fan_target, + set_fan_target); +static DEVICE_ATTR(fan1_fault, S_IRUGO, show_fan_fault, NULL); + +static DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, show_pwm_enable, + set_pwm_enable); + +/* sensors present on all models */ +static struct attribute *emc2103_attributes[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_min.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, + &sensor_dev_attr_temp1_fault.dev_attr.attr, + &sensor_dev_attr_temp1_min_alarm.dev_attr.attr, + &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, + &sensor_dev_attr_temp2_input.dev_attr.attr, + &sensor_dev_attr_temp2_min.dev_attr.attr, + &sensor_dev_attr_temp2_max.dev_attr.attr, + &sensor_dev_attr_temp2_fault.dev_attr.attr, + &sensor_dev_attr_temp2_min_alarm.dev_attr.attr, + &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, + &dev_attr_fan1_input.attr, + &dev_attr_fan1_div.attr, + &dev_attr_fan1_target.attr, + &dev_attr_fan1_fault.attr, + &dev_attr_pwm1_enable.attr, + NULL +}; + +/* extra temperature sensors only present on 2103-2 and 2103-4 */ +static struct attribute *emc2103_attributes_temp3[] = { + &sensor_dev_attr_temp3_input.dev_attr.attr, + &sensor_dev_attr_temp3_min.dev_attr.attr, + &sensor_dev_attr_temp3_max.dev_attr.attr, + &sensor_dev_attr_temp3_fault.dev_attr.attr, + &sensor_dev_attr_temp3_min_alarm.dev_attr.attr, + &sensor_dev_attr_temp3_max_alarm.dev_attr.attr, + NULL +}; + +/* extra temperature sensors only present on 2103-2 and 2103-4 in APD mode */ +static struct attribute *emc2103_attributes_temp4[] = { + &sensor_dev_attr_temp4_input.dev_attr.attr, + &sensor_dev_attr_temp4_min.dev_attr.attr, + &sensor_dev_attr_temp4_max.dev_attr.attr, + &sensor_dev_attr_temp4_fault.dev_attr.attr, + &sensor_dev_attr_temp4_min_alarm.dev_attr.attr, + &sensor_dev_attr_temp4_max_alarm.dev_attr.attr, + NULL +}; + +static const struct attribute_group emc2103_group = { + .attrs = emc2103_attributes, +}; + +static const struct attribute_group emc2103_temp3_group = { + .attrs = emc2103_attributes_temp3, +}; + +static const struct attribute_group emc2103_temp4_group = { + .attrs = emc2103_attributes_temp4, +}; + +static int +emc2103_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + struct emc2103_data *data; + int status; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -EIO; + + data = kzalloc(sizeof(struct emc2103_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + i2c_set_clientdata(client, data); + mutex_init(&data->update_lock); + + /* 2103-2 and 2103-4 have 3 external diodes, 2103-1 has 1 */ + status = i2c_smbus_read_byte_data(client, REG_PRODUCT_ID); + if (status == 0x24) { + /* 2103-1 only has 1 external diode */ + data->temp_count = 2; + } else { + /* 2103-2 and 2103-4 have 3 or 4 external diodes */ + status = i2c_smbus_read_byte_data(client, REG_CONF1); + if (status < 0) { + dev_dbg(&client->dev, "reg 0x%02x, err %d\n", REG_CONF1, + status); + goto exit_free; + } + + /* detect current state of hardware */ + data->temp_count = (status & 0x01) ? 4 : 3; + + /* force APD state if module parameter is set */ + if (apd == 0) { + /* force APD mode off */ + data->temp_count = 3; + status &= ~(0x01); + i2c_smbus_write_byte_data(client, REG_CONF1, status); + } else if (apd == 1) { + /* force APD mode on */ + data->temp_count = 4; + status |= 0x01; + i2c_smbus_write_byte_data(client, REG_CONF1, status); + } + } + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &emc2103_group); + if (status) + goto exit_free; + + if (data->temp_count >= 3) { + status = sysfs_create_group(&client->dev.kobj, + &emc2103_temp3_group); + if (status) + goto exit_remove; + } + + if (data->temp_count == 4) { + status = sysfs_create_group(&client->dev.kobj, + &emc2103_temp4_group); + if (status) + goto exit_remove_temp3; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + status = PTR_ERR(data->hwmon_dev); + goto exit_remove_temp4; + } + + dev_info(&client->dev, "%s: sensor '%s'\n", + dev_name(data->hwmon_dev), client->name); + + return 0; + +exit_remove_temp4: + if (data->temp_count == 4) + sysfs_remove_group(&client->dev.kobj, &emc2103_temp4_group); +exit_remove_temp3: + if (data->temp_count >= 3) + sysfs_remove_group(&client->dev.kobj, &emc2103_temp3_group); +exit_remove: + sysfs_remove_group(&client->dev.kobj, &emc2103_group); +exit_free: + kfree(data); + return status; +} + +static int emc2103_remove(struct i2c_client *client) +{ + struct emc2103_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + + if (data->temp_count == 4) + sysfs_remove_group(&client->dev.kobj, &emc2103_temp4_group); + + if (data->temp_count >= 3) + sysfs_remove_group(&client->dev.kobj, &emc2103_temp3_group); + + sysfs_remove_group(&client->dev.kobj, &emc2103_group); + + kfree(data); + return 0; +} + +static const struct i2c_device_id emc2103_ids[] = { + { "emc2103", 0, }, + { /* LIST END */ } +}; +MODULE_DEVICE_TABLE(i2c, emc2103_ids); + +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int +emc2103_detect(struct i2c_client *new_client, struct i2c_board_info *info) +{ + struct i2c_adapter *adapter = new_client->adapter; + int manufacturer, product; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -ENODEV; + + manufacturer = i2c_smbus_read_byte_data(new_client, REG_MFG_ID); + if (manufacturer != 0x5D) + return -ENODEV; + + product = i2c_smbus_read_byte_data(new_client, REG_PRODUCT_ID); + if ((product != 0x24) && (product != 0x26)) + return -ENODEV; + + strlcpy(info->type, "emc2103", I2C_NAME_SIZE); + + return 0; +} + +static struct i2c_driver emc2103_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "emc2103", + }, + .probe = emc2103_probe, + .remove = emc2103_remove, + .id_table = emc2103_ids, + .detect = emc2103_detect, + .address_list = normal_i2c, +}; + +static int __init sensors_emc2103_init(void) +{ + return i2c_add_driver(&emc2103_driver); +} + +static void __exit sensors_emc2103_exit(void) +{ + i2c_del_driver(&emc2103_driver); +} + +MODULE_AUTHOR("Steve Glendinning <steve.glendinning@smsc.com>"); +MODULE_DESCRIPTION("SMSC EMC2103 hwmon driver"); +MODULE_LICENSE("GPL"); + +module_init(sensors_emc2103_init); +module_exit(sensors_emc2103_exit); diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c index 6207120dcd4..537841ef44b 100644 --- a/drivers/hwmon/f71882fg.c +++ b/drivers/hwmon/f71882fg.c @@ -45,7 +45,6 @@ #define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */ #define SIO_FINTEK_ID 0x1934 /* Manufacturers ID */ -#define SIO_F71808_ID 0x0901 /* Chipset ID */ #define SIO_F71858_ID 0x0507 /* Chipset ID */ #define SIO_F71862_ID 0x0601 /* Chipset ID */ #define SIO_F71882_ID 0x0541 /* Chipset ID */ @@ -97,10 +96,9 @@ static unsigned short force_id; module_param(force_id, ushort, 0); MODULE_PARM_DESC(force_id, "Override the detected device ID"); -enum chips { f71808fg, f71858fg, f71862fg, f71882fg, f71889fg, f8000 }; +enum chips { f71858fg, f71862fg, f71882fg, f71889fg, f8000 }; static const char *f71882fg_names[] = { - "f71808fg", "f71858fg", "f71862fg", "f71882fg", @@ -308,8 +306,8 @@ static struct sensor_device_attribute_2 f71858fg_in_temp_attr[] = { SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2), }; -/* In attr common to the f71862fg, f71882fg and f71889fg */ -static struct sensor_device_attribute_2 fxxxx_in_attr[] = { +/* Temp and in attr common to the f71862fg, f71882fg and f71889fg */ +static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = { SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0), SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1), SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2), @@ -319,22 +317,6 @@ static struct sensor_device_attribute_2 fxxxx_in_attr[] = { SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6), SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7), SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8), -}; - -/* In attr for the f71808fg */ -static struct sensor_device_attribute_2 f71808_in_attr[] = { - SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0), - SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1), - SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2), - SENSOR_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 0, 3), - SENSOR_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 0, 4), - SENSOR_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 0, 5), - SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 7), - SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 8), -}; - -/* Temp attr common to the f71808fg, f71862fg, f71882fg and f71889fg */ -static struct sensor_device_attribute_2 fxxxx_temp_attr[] = { SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 1), SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 1), @@ -373,10 +355,6 @@ static struct sensor_device_attribute_2 fxxxx_temp_attr[] = { store_temp_beep, 0, 6), SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 2), SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 2), -}; - -/* Temp and in attr common to the f71862fg, f71882fg and f71889fg */ -static struct sensor_device_attribute_2 f71862_temp_attr[] = { SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 3), SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 3), @@ -1011,11 +989,6 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev) data->temp_type[1] = 6; break; } - } else if (data->type == f71808fg) { - reg = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE); - data->temp_type[1] = (reg & 0x02) ? 2 : 4; - data->temp_type[2] = (reg & 0x04) ? 2 : 4; - } else { reg2 = f71882fg_read8(data, F71882FG_REG_PECI); if ((reg2 & 0x03) == 0x01) @@ -1898,8 +1871,7 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev, val /= 1000; - if (data->type == f71889fg - || data->type == f71808fg) + if (data->type == f71889fg) val = SENSORS_LIMIT(val, -128, 127); else val = SENSORS_LIMIT(val, 0, 127); @@ -2002,28 +1974,8 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) /* fall through! */ case f71862fg: err = f71882fg_create_sysfs_files(pdev, - f71862_temp_attr, - ARRAY_SIZE(f71862_temp_attr)); - if (err) - goto exit_unregister_sysfs; - err = f71882fg_create_sysfs_files(pdev, - fxxxx_in_attr, - ARRAY_SIZE(fxxxx_in_attr)); - if (err) - goto exit_unregister_sysfs; - err = f71882fg_create_sysfs_files(pdev, - fxxxx_temp_attr, - ARRAY_SIZE(fxxxx_temp_attr)); - break; - case f71808fg: - err = f71882fg_create_sysfs_files(pdev, - f71808_in_attr, - ARRAY_SIZE(f71808_in_attr)); - if (err) - goto exit_unregister_sysfs; - err = f71882fg_create_sysfs_files(pdev, - fxxxx_temp_attr, - ARRAY_SIZE(fxxxx_temp_attr)); + fxxxx_in_temp_attr, + ARRAY_SIZE(fxxxx_in_temp_attr)); break; case f8000: err = f71882fg_create_sysfs_files(pdev, @@ -2050,7 +2002,6 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) case f71862fg: err = (data->pwm_enable & 0x15) != 0x15; break; - case f71808fg: case f71882fg: case f71889fg: err = 0; @@ -2096,7 +2047,6 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) f8000_auto_pwm_attr, ARRAY_SIZE(f8000_auto_pwm_attr)); break; - case f71808fg: case f71889fg: for (i = 0; i < nr_fans; i++) { data->pwm_auto_point_mapping[i] = @@ -2176,22 +2126,8 @@ static int f71882fg_remove(struct platform_device *pdev) /* fall through! */ case f71862fg: f71882fg_remove_sysfs_files(pdev, - f71862_temp_attr, - ARRAY_SIZE(f71862_temp_attr)); - f71882fg_remove_sysfs_files(pdev, - fxxxx_in_attr, - ARRAY_SIZE(fxxxx_in_attr)); - f71882fg_remove_sysfs_files(pdev, - fxxxx_temp_attr, - ARRAY_SIZE(fxxxx_temp_attr)); - break; - case f71808fg: - f71882fg_remove_sysfs_files(pdev, - f71808_in_attr, - ARRAY_SIZE(f71808_in_attr)); - f71882fg_remove_sysfs_files(pdev, - fxxxx_temp_attr, - ARRAY_SIZE(fxxxx_temp_attr)); + fxxxx_in_temp_attr, + ARRAY_SIZE(fxxxx_in_temp_attr)); break; case f8000: f71882fg_remove_sysfs_files(pdev, @@ -2259,9 +2195,6 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address, devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID); switch (devid) { - case SIO_F71808_ID: - sio_data->type = f71808fg; - break; case SIO_F71858_ID: sio_data->type = f71858fg; break; diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c index 25763d2223b..f7701295937 100644 --- a/drivers/hwmon/it87.c +++ b/drivers/hwmon/it87.c @@ -259,6 +259,7 @@ struct it87_sio_data { u8 revision; u8 vid_value; u8 beep_pin; + u8 internal; /* Internal sensors can be labeled */ /* Features skipped based on config or DMI */ u8 skip_vid; u8 skip_fan; @@ -1194,6 +1195,22 @@ static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL); +static ssize_t show_label(struct device *dev, struct device_attribute *attr, + char *buf) +{ + static const char *labels[] = { + "+5V", + "5VSB", + "Vbat", + }; + int nr = to_sensor_dev_attr(attr)->index; + + return sprintf(buf, "%s\n", labels[nr]); +} +static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL, 0); +static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_label, NULL, 1); +static SENSOR_DEVICE_ATTR(in8_label, S_IRUGO, show_label, NULL, 2); + static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { @@ -1434,6 +1451,17 @@ static const struct attribute_group it87_group_vid = { .attrs = it87_attributes_vid, }; +static struct attribute *it87_attributes_label[] = { + &sensor_dev_attr_in3_label.dev_attr.attr, + &sensor_dev_attr_in7_label.dev_attr.attr, + &sensor_dev_attr_in8_label.dev_attr.attr, + NULL +}; + +static const struct attribute_group it87_group_label = { + .attrs = it87_attributes_vid, +}; + /* SuperIO detection - will change isa_address if a chip is found */ static int __init it87_find(unsigned short *address, struct it87_sio_data *sio_data) @@ -1487,6 +1515,9 @@ static int __init it87_find(unsigned short *address, pr_info("it87: Found IT%04xF chip at 0x%x, revision %d\n", chip_type, *address, sio_data->revision); + /* in8 (Vbat) is always internal */ + sio_data->internal = (1 << 2); + /* Read GPIO config and VID value from LDN 7 (GPIO) */ if (sio_data->type == it87) { /* The IT8705F doesn't have VID pins at all */ @@ -1540,9 +1571,9 @@ static int __init it87_find(unsigned short *address, pr_notice("it87: Routing internal VCCH to in7\n"); } if (reg & (1 << 0)) - pr_info("it87: in3 is VCC (+5V)\n"); + sio_data->internal |= (1 << 0); if (reg & (1 << 1)) - pr_info("it87: in7 is VCCH (+5V Stand-By)\n"); + sio_data->internal |= (1 << 1); sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f; } @@ -1600,6 +1631,7 @@ static void it87_remove_files(struct device *dev) } if (!sio_data->skip_vid) sysfs_remove_group(&dev->kobj, &it87_group_vid); + sysfs_remove_group(&dev->kobj, &it87_group_label); } static int __devinit it87_probe(struct platform_device *pdev) @@ -1725,6 +1757,16 @@ static int __devinit it87_probe(struct platform_device *pdev) goto ERROR4; } + /* Export labels for internal sensors */ + for (i = 0; i < 3; i++) { + if (!(sio_data->internal & (1 << i))) + continue; + err = sysfs_create_file(&dev->kobj, + it87_attributes_label[i]); + if (err) + goto ERROR4; + } + data->hwmon_dev = hwmon_device_register(dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c index 8bdf80d9159..39ead2a4d3c 100644 --- a/drivers/hwmon/k8temp.c +++ b/drivers/hwmon/k8temp.c @@ -143,6 +143,37 @@ static const struct pci_device_id k8temp_ids[] = { MODULE_DEVICE_TABLE(pci, k8temp_ids); +static int __devinit is_rev_g_desktop(u8 model) +{ + u32 brandidx; + + if (model < 0x69) + return 0; + + if (model == 0xc1 || model == 0x6c || model == 0x7c) + return 0; + + /* + * Differentiate between AM2 and ASB1. + * See "Constructing the processor Name String" in "Revision + * Guide for AMD NPT Family 0Fh Processors" (33610). + */ + brandidx = cpuid_ebx(0x80000001); + brandidx = (brandidx >> 9) & 0x1f; + + /* Single core */ + if ((model == 0x6f || model == 0x7f) && + (brandidx == 0x7 || brandidx == 0x9 || brandidx == 0xc)) + return 0; + + /* Dual core */ + if (model == 0x6b && + (brandidx == 0xb || brandidx == 0xc)) + return 0; + + return 1; +} + static int __devinit k8temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -179,9 +210,7 @@ static int __devinit k8temp_probe(struct pci_dev *pdev, "wrong - check erratum #141\n"); } - if ((model >= 0x69) && - !(model == 0xc1 || model == 0x6c || model == 0x7c || - model == 0x6b || model == 0x6f || model == 0x7f)) { + if (is_rev_g_desktop(model)) { /* * RevG desktop CPUs (i.e. no socket S1G1 or * ASB1 parts) need additional offset, @@ -252,12 +281,13 @@ static int __devinit k8temp_probe(struct pci_dev *pdev, &sensor_dev_attr_temp3_input.dev_attr); if (err) goto exit_remove; - if (data->sensorsp & SEL_PLACE) + if (data->sensorsp & SEL_PLACE) { err = device_create_file(&pdev->dev, &sensor_dev_attr_temp4_input. dev_attr); if (err) goto exit_remove; + } } err = device_create_file(&pdev->dev, &dev_attr_name); diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c index 393f354f92a..ab5b87a8167 100644 --- a/drivers/hwmon/lm75.c +++ b/drivers/hwmon/lm75.c @@ -280,10 +280,49 @@ static int lm75_detect(struct i2c_client *new_client, return 0; } +#ifdef CONFIG_PM +static int lm75_suspend(struct device *dev) +{ + int status; + struct i2c_client *client = to_i2c_client(dev); + status = lm75_read_value(client, LM75_REG_CONF); + if (status < 0) { + dev_dbg(&client->dev, "Can't read config? %d\n", status); + return status; + } + status = status | LM75_SHUTDOWN; + lm75_write_value(client, LM75_REG_CONF, status); + return 0; +} + +static int lm75_resume(struct device *dev) +{ + int status; + struct i2c_client *client = to_i2c_client(dev); + status = lm75_read_value(client, LM75_REG_CONF); + if (status < 0) { + dev_dbg(&client->dev, "Can't read config? %d\n", status); + return status; + } + status = status & ~LM75_SHUTDOWN; + lm75_write_value(client, LM75_REG_CONF, status); + return 0; +} + +static const struct dev_pm_ops lm75_dev_pm_ops = { + .suspend = lm75_suspend, + .resume = lm75_resume, +}; +#define LM75_DEV_PM_OPS (&lm75_dev_pm_ops) +#else +#define LM75_DEV_PM_OPS NULL +#endif /* CONFIG_PM */ + static struct i2c_driver lm75_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "lm75", + .pm = LM75_DEV_PM_OPS, }, .probe = lm75_probe, .remove = lm75_remove, diff --git a/drivers/hwmon/lm75.h b/drivers/hwmon/lm75.h index 7c93454bb4e..e547a3eb4de 100644 --- a/drivers/hwmon/lm75.h +++ b/drivers/hwmon/lm75.h @@ -30,6 +30,7 @@ /* straight from the datasheet */ #define LM75_TEMP_MIN (-55000) #define LM75_TEMP_MAX 125000 +#define LM75_SHUTDOWN 0x01 /* TEMP: 0.001C/bit (-55C to +125C) REG: (0.5C/bit, two's complement) << 7 */ diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c index 21d201befc2..65930832930 100644 --- a/drivers/hwmon/ltc4245.c +++ b/drivers/hwmon/ltc4245.c @@ -21,6 +21,7 @@ #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> +#include <linux/i2c/ltc4245.h> /* Here are names of the chip's registers (a.k.a. commands) */ enum ltc4245_cmd { @@ -60,8 +61,72 @@ struct ltc4245_data { /* Voltage registers */ u8 vregs[0x0d]; + + /* GPIO ADC registers */ + bool use_extra_gpios; + int gpios[3]; }; +/* + * Update the readings from the GPIO pins. If the driver has been configured to + * sample all GPIO's as analog voltages, a round-robin sampling method is used. + * Otherwise, only the configured GPIO pin is sampled. + * + * LOCKING: must hold data->update_lock + */ +static void ltc4245_update_gpios(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ltc4245_data *data = i2c_get_clientdata(client); + u8 gpio_curr, gpio_next, gpio_reg; + int i; + + /* no extra gpio support, we're basically done */ + if (!data->use_extra_gpios) { + data->gpios[0] = data->vregs[LTC4245_GPIOADC - 0x10]; + return; + } + + /* + * If the last reading was too long ago, then we mark all old GPIO + * readings as stale by setting them to -EAGAIN + */ + if (time_after(jiffies, data->last_updated + 5 * HZ)) { + dev_dbg(&client->dev, "Marking GPIOs invalid\n"); + for (i = 0; i < ARRAY_SIZE(data->gpios); i++) + data->gpios[i] = -EAGAIN; + } + + /* + * Get the current GPIO pin + * + * The datasheet calls these GPIO[1-3], but we'll calculate the zero + * based array index instead, and call them GPIO[0-2]. This is much + * easier to think about. + */ + gpio_curr = (data->cregs[LTC4245_GPIO] & 0xc0) >> 6; + if (gpio_curr > 0) + gpio_curr -= 1; + + /* Read the GPIO voltage from the GPIOADC register */ + data->gpios[gpio_curr] = data->vregs[LTC4245_GPIOADC - 0x10]; + + /* Find the next GPIO pin to read */ + gpio_next = (gpio_curr + 1) % ARRAY_SIZE(data->gpios); + + /* + * Calculate the correct setting for the GPIO register so it will + * sample the next GPIO pin + */ + gpio_reg = (data->cregs[LTC4245_GPIO] & 0x3f) | ((gpio_next + 1) << 6); + + /* Update the GPIO register */ + i2c_smbus_write_byte_data(client, LTC4245_GPIO, gpio_reg); + + /* Update saved data */ + data->cregs[LTC4245_GPIO] = gpio_reg; +} + static struct ltc4245_data *ltc4245_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); @@ -93,6 +158,9 @@ static struct ltc4245_data *ltc4245_update_device(struct device *dev) data->vregs[i] = val; } + /* Update GPIO readings */ + ltc4245_update_gpios(dev); + data->last_updated = jiffies; data->valid = 1; } @@ -233,6 +301,22 @@ static ssize_t ltc4245_show_alarm(struct device *dev, return snprintf(buf, PAGE_SIZE, "%u\n", (reg & mask) ? 1 : 0); } +static ssize_t ltc4245_show_gpio(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct ltc4245_data *data = ltc4245_update_device(dev); + int val = data->gpios[attr->index]; + + /* handle stale GPIO's */ + if (val < 0) + return val; + + /* Convert to millivolts and print */ + return snprintf(buf, PAGE_SIZE, "%u\n", val * 10); +} + /* These macros are used below in constructing device attribute objects * for use with sysfs_create_group() to make a sysfs device file * for each register. @@ -254,6 +338,10 @@ static ssize_t ltc4245_show_alarm(struct device *dev, static SENSOR_DEVICE_ATTR_2(name, S_IRUGO, \ ltc4245_show_alarm, NULL, (mask), reg) +#define LTC4245_GPIO_VOLTAGE(name, gpio_num) \ + static SENSOR_DEVICE_ATTR(name, S_IRUGO, \ + ltc4245_show_gpio, NULL, gpio_num) + /* Construct a sensor_device_attribute structure for each register */ /* Input voltages */ @@ -293,7 +381,9 @@ LTC4245_ALARM(in7_min_alarm, (1 << 2), LTC4245_FAULT2); LTC4245_ALARM(in8_min_alarm, (1 << 3), LTC4245_FAULT2); /* GPIO voltages */ -LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC); +LTC4245_GPIO_VOLTAGE(in9_input, 0); +LTC4245_GPIO_VOLTAGE(in10_input, 1); +LTC4245_GPIO_VOLTAGE(in11_input, 2); /* Power Consumption (virtual) */ LTC4245_POWER(power1_input, LTC4245_12VSENSE); @@ -304,7 +394,7 @@ LTC4245_POWER(power4_input, LTC4245_VEESENSE); /* Finally, construct an array of pointers to members of the above objects, * as required for sysfs_create_group() */ -static struct attribute *ltc4245_attributes[] = { +static struct attribute *ltc4245_std_attributes[] = { &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, @@ -345,10 +435,77 @@ static struct attribute *ltc4245_attributes[] = { NULL, }; -static const struct attribute_group ltc4245_group = { - .attrs = ltc4245_attributes, +static struct attribute *ltc4245_gpio_attributes[] = { + &sensor_dev_attr_in10_input.dev_attr.attr, + &sensor_dev_attr_in11_input.dev_attr.attr, + NULL, +}; + +static const struct attribute_group ltc4245_std_group = { + .attrs = ltc4245_std_attributes, +}; + +static const struct attribute_group ltc4245_gpio_group = { + .attrs = ltc4245_gpio_attributes, }; +static int ltc4245_sysfs_create_groups(struct i2c_client *client) +{ + struct ltc4245_data *data = i2c_get_clientdata(client); + struct device *dev = &client->dev; + int ret; + + /* register the standard sysfs attributes */ + ret = sysfs_create_group(&dev->kobj, <c4245_std_group); + if (ret) { + dev_err(dev, "unable to register standard attributes\n"); + return ret; + } + + /* if we're using the extra gpio support, register it's attributes */ + if (data->use_extra_gpios) { + ret = sysfs_create_group(&dev->kobj, <c4245_gpio_group); + if (ret) { + dev_err(dev, "unable to register gpio attributes\n"); + sysfs_remove_group(&dev->kobj, <c4245_std_group); + return ret; + } + } + + return 0; +} + +static void ltc4245_sysfs_remove_groups(struct i2c_client *client) +{ + struct ltc4245_data *data = i2c_get_clientdata(client); + struct device *dev = &client->dev; + + if (data->use_extra_gpios) + sysfs_remove_group(&dev->kobj, <c4245_gpio_group); + + sysfs_remove_group(&dev->kobj, <c4245_std_group); +} + +static bool ltc4245_use_extra_gpios(struct i2c_client *client) +{ + struct ltc4245_platform_data *pdata = dev_get_platdata(&client->dev); +#ifdef CONFIG_OF + struct device_node *np = client->dev.of_node; +#endif + + /* prefer platform data */ + if (pdata) + return pdata->use_extra_gpios; + +#ifdef CONFIG_OF + /* fallback on OF */ + if (of_find_property(np, "ltc4245,use-extra-gpios", NULL)) + return true; +#endif + + return false; +} + static int ltc4245_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -367,15 +524,16 @@ static int ltc4245_probe(struct i2c_client *client, i2c_set_clientdata(client, data); mutex_init(&data->update_lock); + data->use_extra_gpios = ltc4245_use_extra_gpios(client); /* Initialize the LTC4245 chip */ i2c_smbus_write_byte_data(client, LTC4245_FAULT1, 0x00); i2c_smbus_write_byte_data(client, LTC4245_FAULT2, 0x00); /* Register sysfs hooks */ - ret = sysfs_create_group(&client->dev.kobj, <c4245_group); + ret = ltc4245_sysfs_create_groups(client); if (ret) - goto out_sysfs_create_group; + goto out_sysfs_create_groups; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { @@ -386,8 +544,8 @@ static int ltc4245_probe(struct i2c_client *client, return 0; out_hwmon_device_register: - sysfs_remove_group(&client->dev.kobj, <c4245_group); -out_sysfs_create_group: + ltc4245_sysfs_remove_groups(client); +out_sysfs_create_groups: kfree(data); out_kzalloc: return ret; @@ -398,8 +556,7 @@ static int ltc4245_remove(struct i2c_client *client) struct ltc4245_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); - sysfs_remove_group(&client->dev.kobj, <c4245_group); - + ltc4245_sysfs_remove_groups(client); kfree(data); return 0; diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c index 4a64b85d4ec..68e69a49633 100644 --- a/drivers/hwmon/pc87360.c +++ b/drivers/hwmon/pc87360.c @@ -1610,11 +1610,8 @@ static struct pc87360_data *pc87360_update_device(struct device *dev) static int __init pc87360_device_add(unsigned short address) { - struct resource res = { - .name = "pc87360", - .flags = IORESOURCE_IO, - }; - int err, i; + struct resource res[3]; + int err, i, res_count; pdev = platform_device_alloc("pc87360", address); if (!pdev) { @@ -1623,22 +1620,28 @@ static int __init pc87360_device_add(unsigned short address) goto exit; } + memset(res, 0, 3 * sizeof(struct resource)); + res_count = 0; for (i = 0; i < 3; i++) { if (!extra_isa[i]) continue; - res.start = extra_isa[i]; - res.end = extra_isa[i] + PC87360_EXTENT - 1; + res[res_count].start = extra_isa[i]; + res[res_count].end = extra_isa[i] + PC87360_EXTENT - 1; + res[res_count].name = "pc87360", + res[res_count].flags = IORESOURCE_IO, - err = acpi_check_resource_conflict(&res); + err = acpi_check_resource_conflict(&res[res_count]); if (err) goto exit_device_put; - err = platform_device_add_resources(pdev, &res, 1); - if (err) { - printk(KERN_ERR "pc87360: Device resource[%d] " - "addition failed (%d)\n", i, err); - goto exit_device_put; - } + res_count++; + } + + err = platform_device_add_resources(pdev, res, res_count); + if (err) { + printk(KERN_ERR "pc87360: Device resources addition failed " + "(%d)\n", err); + goto exit_device_put; } err = platform_device_add(pdev); diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c index 3170b26d244..9ec4daaf6ca 100644 --- a/drivers/hwmon/pc87427.c +++ b/drivers/hwmon/pc87427.c @@ -1,7 +1,7 @@ /* * pc87427.c - hardware monitoring driver for the * National Semiconductor PC87427 Super-I/O chip - * Copyright (C) 2006 Jean Delvare <khali@linux-fr.org> + * Copyright (C) 2006, 2008, 2010 Jean Delvare <khali@linux-fr.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -15,10 +15,11 @@ * Supports the following chips: * * Chip #vin #fan #pwm #temp devid - * PC87427 - 8 - - 0xF2 + * PC87427 - 8 4 6 0xF2 * * This driver assumes that no more than one chip is present. - * Only fan inputs are supported so far, although the chip can do much more. + * Only fans are fully supported so far. Temperatures are in read-only + * mode, and voltages aren't supported at all. */ #include <linux/module.h> @@ -57,6 +58,25 @@ struct pc87427_data { u16 fan[8]; /* register values */ u16 fan_min[8]; /* register values */ u8 fan_status[8]; /* register values */ + + u8 pwm_enabled; /* bit vector */ + u8 pwm_auto_ok; /* bit vector */ + u8 pwm_enable[4]; /* register values */ + u8 pwm[4]; /* register values */ + + u8 temp_enabled; /* bit vector */ + s16 temp[6]; /* register values */ + s8 temp_min[6]; /* register values */ + s8 temp_max[6]; /* register values */ + s8 temp_crit[6]; /* register values */ + u8 temp_status[6]; /* register values */ + u8 temp_type[6]; /* register values */ +}; + +struct pc87427_sio_data { + unsigned short address[2]; + u8 has_fanin; + u8 has_fanout; }; /* @@ -65,6 +85,13 @@ struct pc87427_data { #define SIOREG_LDSEL 0x07 /* Logical device select */ #define SIOREG_DEVID 0x20 /* Device ID */ +#define SIOREG_CF2 0x22 /* Configuration 2 */ +#define SIOREG_CF3 0x23 /* Configuration 3 */ +#define SIOREG_CF4 0x24 /* Configuration 4 */ +#define SIOREG_CF5 0x25 /* Configuration 5 */ +#define SIOREG_CFB 0x2B /* Configuration B */ +#define SIOREG_CFC 0x2C /* Configuration C */ +#define SIOREG_CFD 0x2D /* Configuration D */ #define SIOREG_ACT 0x30 /* Device activation */ #define SIOREG_MAP 0x50 /* I/O or memory mapping */ #define SIOREG_IOBASE 0x60 /* I/O base address */ @@ -102,6 +129,8 @@ static inline void superio_exit(int sioaddr) #define BANK_FM(nr) (nr) #define BANK_FT(nr) (0x08 + (nr)) #define BANK_FC(nr) (0x10 + (nr) * 2) +#define BANK_TM(nr) (nr) +#define BANK_VM(nr) (0x08 + (nr)) /* * I/O access functions @@ -179,6 +208,127 @@ static inline u16 fan_to_reg(unsigned long val) } /* + * PWM registers and conversions + */ + +#define PC87427_REG_PWM_ENABLE 0x10 +#define PC87427_REG_PWM_DUTY 0x12 + +#define PWM_ENABLE_MODE_MASK (7 << 4) +#define PWM_ENABLE_CTLEN (1 << 0) + +#define PWM_MODE_MANUAL (0 << 4) +#define PWM_MODE_AUTO (1 << 4) +#define PWM_MODE_OFF (2 << 4) +#define PWM_MODE_ON (7 << 4) + +/* Dedicated function to read all registers related to a given PWM output. + This saves us quite a few locks and bank selections. + Must be called with data->lock held. + nr is from 0 to 3 */ +static void pc87427_readall_pwm(struct pc87427_data *data, u8 nr) +{ + int iobase = data->address[LD_FAN]; + + outb(BANK_FC(nr), iobase + PC87427_REG_BANK); + data->pwm_enable[nr] = inb(iobase + PC87427_REG_PWM_ENABLE); + data->pwm[nr] = inb(iobase + PC87427_REG_PWM_DUTY); +} + +static inline int pwm_enable_from_reg(u8 reg) +{ + switch (reg & PWM_ENABLE_MODE_MASK) { + case PWM_MODE_ON: + return 0; + case PWM_MODE_MANUAL: + case PWM_MODE_OFF: + return 1; + case PWM_MODE_AUTO: + return 2; + default: + return -EPROTO; + } +} + +static inline u8 pwm_enable_to_reg(unsigned long val, u8 pwmval) +{ + switch (val) { + default: + return PWM_MODE_ON; + case 1: + return pwmval ? PWM_MODE_MANUAL : PWM_MODE_OFF; + case 2: + return PWM_MODE_AUTO; + } +} + +/* + * Temperature registers and conversions + */ + +#define PC87427_REG_TEMP_STATUS 0x10 +#define PC87427_REG_TEMP 0x14 +#define PC87427_REG_TEMP_MAX 0x18 +#define PC87427_REG_TEMP_MIN 0x19 +#define PC87427_REG_TEMP_CRIT 0x1a +#define PC87427_REG_TEMP_TYPE 0x1d + +#define TEMP_STATUS_CHANEN (1 << 0) +#define TEMP_STATUS_LOWFLG (1 << 1) +#define TEMP_STATUS_HIGHFLG (1 << 2) +#define TEMP_STATUS_CRITFLG (1 << 3) +#define TEMP_STATUS_SENSERR (1 << 5) +#define TEMP_TYPE_MASK (3 << 5) + +#define TEMP_TYPE_THERMISTOR (1 << 5) +#define TEMP_TYPE_REMOTE_DIODE (2 << 5) +#define TEMP_TYPE_LOCAL_DIODE (3 << 5) + +/* Dedicated function to read all registers related to a given temperature + input. This saves us quite a few locks and bank selections. + Must be called with data->lock held. + nr is from 0 to 5 */ +static void pc87427_readall_temp(struct pc87427_data *data, u8 nr) +{ + int iobase = data->address[LD_TEMP]; + + outb(BANK_TM(nr), iobase + PC87427_REG_BANK); + data->temp[nr] = le16_to_cpu(inw(iobase + PC87427_REG_TEMP)); + data->temp_max[nr] = inb(iobase + PC87427_REG_TEMP_MAX); + data->temp_min[nr] = inb(iobase + PC87427_REG_TEMP_MIN); + data->temp_crit[nr] = inb(iobase + PC87427_REG_TEMP_CRIT); + data->temp_type[nr] = inb(iobase + PC87427_REG_TEMP_TYPE); + data->temp_status[nr] = inb(iobase + PC87427_REG_TEMP_STATUS); + /* Clear fan alarm bits */ + outb(data->temp_status[nr], iobase + PC87427_REG_TEMP_STATUS); +} + +static inline unsigned int temp_type_from_reg(u8 reg) +{ + switch (reg & TEMP_TYPE_MASK) { + case TEMP_TYPE_THERMISTOR: + return 4; + case TEMP_TYPE_REMOTE_DIODE: + case TEMP_TYPE_LOCAL_DIODE: + return 3; + default: + return 0; + } +} + +/* We assume 8-bit thermal sensors; 9-bit thermal sensors are possible + too, but I have no idea how to figure out when they are used. */ +static inline long temp_from_reg(s16 reg) +{ + return reg * 1000 / 256; +} + +static inline long temp_from_reg8(s8 reg) +{ + return reg * 1000; +} + +/* * Data interface */ @@ -198,6 +348,21 @@ static struct pc87427_data *pc87427_update_device(struct device *dev) continue; pc87427_readall_fan(data, i); } + + /* PWM outputs */ + for (i = 0; i < 4; i++) { + if (!(data->pwm_enabled & (1 << i))) + continue; + pc87427_readall_pwm(data, i); + } + + /* Temperature channels */ + for (i = 0; i < 6; i++) { + if (!(data->temp_enabled & (1 << i))) + continue; + pc87427_readall_temp(data, i); + } + data->last_updated = jiffies; done: @@ -208,9 +373,8 @@ done: static ssize_t show_fan_input(struct device *dev, struct device_attribute *devattr, char *buf) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct pc87427_data *data = pc87427_update_device(dev); - int nr = attr->index; + int nr = to_sensor_dev_attr(devattr)->index; return sprintf(buf, "%lu\n", fan_from_reg(data->fan[nr])); } @@ -218,9 +382,8 @@ static ssize_t show_fan_input(struct device *dev, struct device_attribute static ssize_t show_fan_min(struct device *dev, struct device_attribute *devattr, char *buf) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct pc87427_data *data = pc87427_update_device(dev); - int nr = attr->index; + int nr = to_sensor_dev_attr(devattr)->index; return sprintf(buf, "%lu\n", fan_from_reg(data->fan_min[nr])); } @@ -228,9 +391,8 @@ static ssize_t show_fan_min(struct device *dev, struct device_attribute static ssize_t show_fan_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct pc87427_data *data = pc87427_update_device(dev); - int nr = attr->index; + int nr = to_sensor_dev_attr(devattr)->index; return sprintf(buf, "%d\n", !!(data->fan_status[nr] & FAN_STATUS_LOSPD)); @@ -239,9 +401,8 @@ static ssize_t show_fan_alarm(struct device *dev, struct device_attribute static ssize_t show_fan_fault(struct device *dev, struct device_attribute *devattr, char *buf) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct pc87427_data *data = pc87427_update_device(dev); - int nr = attr->index; + int nr = to_sensor_dev_attr(devattr)->index; return sprintf(buf, "%d\n", !!(data->fan_status[nr] & FAN_STATUS_STALL)); @@ -251,11 +412,13 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct pc87427_data *data = dev_get_drvdata(dev); - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - int nr = attr->index; - unsigned long val = simple_strtoul(buf, NULL, 10); + int nr = to_sensor_dev_attr(devattr)->index; + unsigned long val; int iobase = data->address[LD_FAN]; + if (strict_strtoul(buf, 10, &val) < 0) + return -EINVAL; + mutex_lock(&data->lock); outb(BANK_FM(nr), iobase + PC87427_REG_BANK); /* The low speed limit registers are read-only while monitoring @@ -377,6 +540,390 @@ static const struct attribute_group pc87427_group_fan[8] = { { .attrs = pc87427_attributes_fan[7] }, }; +/* Must be called with data->lock held and pc87427_readall_pwm() freshly + called */ +static void update_pwm_enable(struct pc87427_data *data, int nr, u8 mode) +{ + int iobase = data->address[LD_FAN]; + data->pwm_enable[nr] &= ~PWM_ENABLE_MODE_MASK; + data->pwm_enable[nr] |= mode; + outb(data->pwm_enable[nr], iobase + PC87427_REG_PWM_ENABLE); +} + +static ssize_t show_pwm_enable(struct device *dev, struct device_attribute + *devattr, char *buf) +{ + struct pc87427_data *data = pc87427_update_device(dev); + int nr = to_sensor_dev_attr(devattr)->index; + int pwm_enable; + + pwm_enable = pwm_enable_from_reg(data->pwm_enable[nr]); + if (pwm_enable < 0) + return pwm_enable; + return sprintf(buf, "%d\n", pwm_enable); +} + +static ssize_t set_pwm_enable(struct device *dev, struct device_attribute + *devattr, const char *buf, size_t count) +{ + struct pc87427_data *data = dev_get_drvdata(dev); + int nr = to_sensor_dev_attr(devattr)->index; + unsigned long val; + + if (strict_strtoul(buf, 10, &val) < 0 || val > 2) + return -EINVAL; + /* Can't go to automatic mode if it isn't configured */ + if (val == 2 && !(data->pwm_auto_ok & (1 << nr))) + return -EINVAL; + + mutex_lock(&data->lock); + pc87427_readall_pwm(data, nr); + update_pwm_enable(data, nr, pwm_enable_to_reg(val, data->pwm[nr])); + mutex_unlock(&data->lock); + + return count; +} + +static ssize_t show_pwm(struct device *dev, struct device_attribute + *devattr, char *buf) +{ + struct pc87427_data *data = pc87427_update_device(dev); + int nr = to_sensor_dev_attr(devattr)->index; + + return sprintf(buf, "%d\n", (int)data->pwm[nr]); +} + +static ssize_t set_pwm(struct device *dev, struct device_attribute + *devattr, const char *buf, size_t count) +{ + struct pc87427_data *data = dev_get_drvdata(dev); + int nr = to_sensor_dev_attr(devattr)->index; + unsigned long val; + int iobase = data->address[LD_FAN]; + u8 mode; + + if (strict_strtoul(buf, 10, &val) < 0 || val > 0xff) + return -EINVAL; + + mutex_lock(&data->lock); + pc87427_readall_pwm(data, nr); + mode = data->pwm_enable[nr] & PWM_ENABLE_MODE_MASK; + if (mode != PWM_MODE_MANUAL && mode != PWM_MODE_OFF) { + dev_notice(dev, "Can't set PWM%d duty cycle while not in " + "manual mode\n", nr + 1); + mutex_unlock(&data->lock); + return -EPERM; + } + + /* We may have to change the mode */ + if (mode == PWM_MODE_MANUAL && val == 0) { + /* Transition from Manual to Off */ + update_pwm_enable(data, nr, PWM_MODE_OFF); + mode = PWM_MODE_OFF; + dev_dbg(dev, "Switching PWM%d from %s to %s\n", nr + 1, + "manual", "off"); + } else if (mode == PWM_MODE_OFF && val != 0) { + /* Transition from Off to Manual */ + update_pwm_enable(data, nr, PWM_MODE_MANUAL); + mode = PWM_MODE_MANUAL; + dev_dbg(dev, "Switching PWM%d from %s to %s\n", nr + 1, + "off", "manual"); + } + + data->pwm[nr] = val; + if (mode == PWM_MODE_MANUAL) + outb(val, iobase + PC87427_REG_PWM_DUTY); + mutex_unlock(&data->lock); + + return count; +} + +static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, + show_pwm_enable, set_pwm_enable, 0); +static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, + show_pwm_enable, set_pwm_enable, 1); +static SENSOR_DEVICE_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, + show_pwm_enable, set_pwm_enable, 2); +static SENSOR_DEVICE_ATTR(pwm4_enable, S_IWUSR | S_IRUGO, + show_pwm_enable, set_pwm_enable, 3); + +static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 0); +static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 1); +static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 2); +static SENSOR_DEVICE_ATTR(pwm4, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 3); + +static struct attribute *pc87427_attributes_pwm[4][3] = { + { + &sensor_dev_attr_pwm1_enable.dev_attr.attr, + &sensor_dev_attr_pwm1.dev_attr.attr, + NULL + }, { + &sensor_dev_attr_pwm2_enable.dev_attr.attr, + &sensor_dev_attr_pwm2.dev_attr.attr, + NULL + }, { + &sensor_dev_attr_pwm3_enable.dev_attr.attr, + &sensor_dev_attr_pwm3.dev_attr.attr, + NULL + }, { + &sensor_dev_attr_pwm4_enable.dev_attr.attr, + &sensor_dev_attr_pwm4.dev_attr.attr, + NULL + } +}; + +static const struct attribute_group pc87427_group_pwm[4] = { + { .attrs = pc87427_attributes_pwm[0] }, + { .attrs = pc87427_attributes_pwm[1] }, + { .attrs = pc87427_attributes_pwm[2] }, + { .attrs = pc87427_attributes_pwm[3] }, +}; + +static ssize_t show_temp_input(struct device *dev, struct device_attribute + *devattr, char *buf) +{ + struct pc87427_data *data = pc87427_update_device(dev); + int nr = to_sensor_dev_attr(devattr)->index; + + return sprintf(buf, "%ld\n", temp_from_reg(data->temp[nr])); +} + +static ssize_t show_temp_min(struct device *dev, struct device_attribute + *devattr, char *buf) +{ + struct pc87427_data *data = pc87427_update_device(dev); + int nr = to_sensor_dev_attr(devattr)->index; + + return sprintf(buf, "%ld\n", temp_from_reg8(data->temp_min[nr])); +} + +static ssize_t show_temp_max(struct device *dev, struct device_attribute + *devattr, char *buf) +{ + struct pc87427_data *data = pc87427_update_device(dev); + int nr = to_sensor_dev_attr(devattr)->index; + + return sprintf(buf, "%ld\n", temp_from_reg8(data->temp_max[nr])); +} + +static ssize_t show_temp_crit(struct device *dev, struct device_attribute + *devattr, char *buf) +{ + struct pc87427_data *data = pc87427_update_device(dev); + int nr = to_sensor_dev_attr(devattr)->index; + + return sprintf(buf, "%ld\n", temp_from_reg8(data->temp_crit[nr])); +} + +static ssize_t show_temp_type(struct device *dev, struct device_attribute + *devattr, char *buf) +{ + struct pc87427_data *data = pc87427_update_device(dev); + int nr = to_sensor_dev_attr(devattr)->index; + + return sprintf(buf, "%u\n", temp_type_from_reg(data->temp_type[nr])); +} + +static ssize_t show_temp_min_alarm(struct device *dev, struct device_attribute + *devattr, char *buf) +{ + struct pc87427_data *data = pc87427_update_device(dev); + int nr = to_sensor_dev_attr(devattr)->index; + + return sprintf(buf, "%d\n", !!(data->temp_status[nr] + & TEMP_STATUS_LOWFLG)); +} + +static ssize_t show_temp_max_alarm(struct device *dev, struct device_attribute + *devattr, char *buf) +{ + struct pc87427_data *data = pc87427_update_device(dev); + int nr = to_sensor_dev_attr(devattr)->index; + + return sprintf(buf, "%d\n", !!(data->temp_status[nr] + & TEMP_STATUS_HIGHFLG)); +} + +static ssize_t show_temp_crit_alarm(struct device *dev, struct device_attribute + *devattr, char *buf) +{ + struct pc87427_data *data = pc87427_update_device(dev); + int nr = to_sensor_dev_attr(devattr)->index; + + return sprintf(buf, "%d\n", !!(data->temp_status[nr] + & TEMP_STATUS_CRITFLG)); +} + +static ssize_t show_temp_fault(struct device *dev, struct device_attribute + *devattr, char *buf) +{ + struct pc87427_data *data = pc87427_update_device(dev); + int nr = to_sensor_dev_attr(devattr)->index; + + return sprintf(buf, "%d\n", !!(data->temp_status[nr] + & TEMP_STATUS_SENSERR)); +} + +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL, 0); +static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_input, NULL, 1); +static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp_input, NULL, 2); +static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp_input, NULL, 3); +static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_temp_input, NULL, 4); +static SENSOR_DEVICE_ATTR(temp6_input, S_IRUGO, show_temp_input, NULL, 5); + +static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO, show_temp_min, NULL, 0); +static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO, show_temp_min, NULL, 1); +static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO, show_temp_min, NULL, 2); +static SENSOR_DEVICE_ATTR(temp4_min, S_IRUGO, show_temp_min, NULL, 3); +static SENSOR_DEVICE_ATTR(temp5_min, S_IRUGO, show_temp_min, NULL, 4); +static SENSOR_DEVICE_ATTR(temp6_min, S_IRUGO, show_temp_min, NULL, 5); + +static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp_max, NULL, 0); +static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO, show_temp_max, NULL, 1); +static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO, show_temp_max, NULL, 2); +static SENSOR_DEVICE_ATTR(temp4_max, S_IRUGO, show_temp_max, NULL, 3); +static SENSOR_DEVICE_ATTR(temp5_max, S_IRUGO, show_temp_max, NULL, 4); +static SENSOR_DEVICE_ATTR(temp6_max, S_IRUGO, show_temp_max, NULL, 5); + +static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp_crit, NULL, 0); +static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_temp_crit, NULL, 1); +static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, show_temp_crit, NULL, 2); +static SENSOR_DEVICE_ATTR(temp4_crit, S_IRUGO, show_temp_crit, NULL, 3); +static SENSOR_DEVICE_ATTR(temp5_crit, S_IRUGO, show_temp_crit, NULL, 4); +static SENSOR_DEVICE_ATTR(temp6_crit, S_IRUGO, show_temp_crit, NULL, 5); + +static SENSOR_DEVICE_ATTR(temp1_type, S_IRUGO, show_temp_type, NULL, 0); +static SENSOR_DEVICE_ATTR(temp2_type, S_IRUGO, show_temp_type, NULL, 1); +static SENSOR_DEVICE_ATTR(temp3_type, S_IRUGO, show_temp_type, NULL, 2); +static SENSOR_DEVICE_ATTR(temp4_type, S_IRUGO, show_temp_type, NULL, 3); +static SENSOR_DEVICE_ATTR(temp5_type, S_IRUGO, show_temp_type, NULL, 4); +static SENSOR_DEVICE_ATTR(temp6_type, S_IRUGO, show_temp_type, NULL, 5); + +static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, + show_temp_min_alarm, NULL, 0); +static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, + show_temp_min_alarm, NULL, 1); +static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, + show_temp_min_alarm, NULL, 2); +static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, + show_temp_min_alarm, NULL, 3); +static SENSOR_DEVICE_ATTR(temp5_min_alarm, S_IRUGO, + show_temp_min_alarm, NULL, 4); +static SENSOR_DEVICE_ATTR(temp6_min_alarm, S_IRUGO, + show_temp_min_alarm, NULL, 5); + +static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, + show_temp_max_alarm, NULL, 0); +static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, + show_temp_max_alarm, NULL, 1); +static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, + show_temp_max_alarm, NULL, 2); +static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, + show_temp_max_alarm, NULL, 3); +static SENSOR_DEVICE_ATTR(temp5_max_alarm, S_IRUGO, + show_temp_max_alarm, NULL, 4); +static SENSOR_DEVICE_ATTR(temp6_max_alarm, S_IRUGO, + show_temp_max_alarm, NULL, 5); + +static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, + show_temp_crit_alarm, NULL, 0); +static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, + show_temp_crit_alarm, NULL, 1); +static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, + show_temp_crit_alarm, NULL, 2); +static SENSOR_DEVICE_ATTR(temp4_crit_alarm, S_IRUGO, + show_temp_crit_alarm, NULL, 3); +static SENSOR_DEVICE_ATTR(temp5_crit_alarm, S_IRUGO, + show_temp_crit_alarm, NULL, 4); +static SENSOR_DEVICE_ATTR(temp6_crit_alarm, S_IRUGO, + show_temp_crit_alarm, NULL, 5); + +static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0); +static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1); +static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_temp_fault, NULL, 2); +static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_temp_fault, NULL, 3); +static SENSOR_DEVICE_ATTR(temp5_fault, S_IRUGO, show_temp_fault, NULL, 4); +static SENSOR_DEVICE_ATTR(temp6_fault, S_IRUGO, show_temp_fault, NULL, 5); + +static struct attribute *pc87427_attributes_temp[6][10] = { + { + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_min.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, + &sensor_dev_attr_temp1_crit.dev_attr.attr, + &sensor_dev_attr_temp1_type.dev_attr.attr, + &sensor_dev_attr_temp1_min_alarm.dev_attr.attr, + &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, + &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, + &sensor_dev_attr_temp1_fault.dev_attr.attr, + NULL + }, { + &sensor_dev_attr_temp2_input.dev_attr.attr, + &sensor_dev_attr_temp2_min.dev_attr.attr, + &sensor_dev_attr_temp2_max.dev_attr.attr, + &sensor_dev_attr_temp2_crit.dev_attr.attr, + &sensor_dev_attr_temp2_type.dev_attr.attr, + &sensor_dev_attr_temp2_min_alarm.dev_attr.attr, + &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, + &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr, + &sensor_dev_attr_temp2_fault.dev_attr.attr, + NULL + }, { + &sensor_dev_attr_temp3_input.dev_attr.attr, + &sensor_dev_attr_temp3_min.dev_attr.attr, + &sensor_dev_attr_temp3_max.dev_attr.attr, + &sensor_dev_attr_temp3_crit.dev_attr.attr, + &sensor_dev_attr_temp3_type.dev_attr.attr, + &sensor_dev_attr_temp3_min_alarm.dev_attr.attr, + &sensor_dev_attr_temp3_max_alarm.dev_attr.attr, + &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr, + &sensor_dev_attr_temp3_fault.dev_attr.attr, + NULL + }, { + &sensor_dev_attr_temp4_input.dev_attr.attr, + &sensor_dev_attr_temp4_min.dev_attr.attr, + &sensor_dev_attr_temp4_max.dev_attr.attr, + &sensor_dev_attr_temp4_crit.dev_attr.attr, + &sensor_dev_attr_temp4_type.dev_attr.attr, + &sensor_dev_attr_temp4_min_alarm.dev_attr.attr, + &sensor_dev_attr_temp4_max_alarm.dev_attr.attr, + &sensor_dev_attr_temp4_crit_alarm.dev_attr.attr, + &sensor_dev_attr_temp4_fault.dev_attr.attr, + NULL + }, { + &sensor_dev_attr_temp5_input.dev_attr.attr, + &sensor_dev_attr_temp5_min.dev_attr.attr, + &sensor_dev_attr_temp5_max.dev_attr.attr, + &sensor_dev_attr_temp5_crit.dev_attr.attr, + &sensor_dev_attr_temp5_type.dev_attr.attr, + &sensor_dev_attr_temp5_min_alarm.dev_attr.attr, + &sensor_dev_attr_temp5_max_alarm.dev_attr.attr, + &sensor_dev_attr_temp5_crit_alarm.dev_attr.attr, + &sensor_dev_attr_temp5_fault.dev_attr.attr, + NULL + }, { + &sensor_dev_attr_temp6_input.dev_attr.attr, + &sensor_dev_attr_temp6_min.dev_attr.attr, + &sensor_dev_attr_temp6_max.dev_attr.attr, + &sensor_dev_attr_temp6_crit.dev_attr.attr, + &sensor_dev_attr_temp6_type.dev_attr.attr, + &sensor_dev_attr_temp6_min_alarm.dev_attr.attr, + &sensor_dev_attr_temp6_max_alarm.dev_attr.attr, + &sensor_dev_attr_temp6_crit_alarm.dev_attr.attr, + &sensor_dev_attr_temp6_fault.dev_attr.attr, + NULL + } +}; + +static const struct attribute_group pc87427_group_temp[6] = { + { .attrs = pc87427_attributes_temp[0] }, + { .attrs = pc87427_attributes_temp[1] }, + { .attrs = pc87427_attributes_temp[2] }, + { .attrs = pc87427_attributes_temp[3] }, + { .attrs = pc87427_attributes_temp[4] }, + { .attrs = pc87427_attributes_temp[5] }, +}; + static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { @@ -391,8 +938,49 @@ static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); * Device detection, attach and detach */ +static void pc87427_release_regions(struct platform_device *pdev, int count) +{ + struct resource *res; + int i; + + for (i = 0; i < count; i++) { + res = platform_get_resource(pdev, IORESOURCE_IO, i); + release_region(res->start, resource_size(res)); + } +} + +static int __devinit pc87427_request_regions(struct platform_device *pdev, + int count) +{ + struct resource *res; + int i, err = 0; + + for (i = 0; i < count; i++) { + res = platform_get_resource(pdev, IORESOURCE_IO, i); + if (!res) { + err = -ENOENT; + dev_err(&pdev->dev, "Missing resource #%d\n", i); + break; + } + if (!request_region(res->start, resource_size(res), DRVNAME)) { + err = -EBUSY; + dev_err(&pdev->dev, + "Failed to request region 0x%lx-0x%lx\n", + (unsigned long)res->start, + (unsigned long)res->end); + break; + } + } + + if (err && i) + pc87427_release_regions(pdev, i); + + return err; +} + static void __devinit pc87427_init_device(struct device *dev) { + struct pc87427_sio_data *sio_data = dev->platform_data; struct pc87427_data *data = dev_get_drvdata(dev); int i; u8 reg; @@ -400,10 +988,12 @@ static void __devinit pc87427_init_device(struct device *dev) /* The FMC module should be ready */ reg = pc87427_read8(data, LD_FAN, PC87427_REG_BANK); if (!(reg & 0x80)) - dev_warn(dev, "FMC module not ready!\n"); + dev_warn(dev, "%s module not ready!\n", "FMC"); /* Check which fans are enabled */ for (i = 0; i < 8; i++) { + if (!(sio_data->has_fanin & (1 << i))) /* Not wired */ + continue; reg = pc87427_read8_bank(data, LD_FAN, BANK_FM(i), PC87427_REG_FAN_STATUS); if (reg & FAN_STATUS_MONEN) @@ -411,37 +1001,93 @@ static void __devinit pc87427_init_device(struct device *dev) } if (!data->fan_enabled) { - dev_dbg(dev, "Enabling all fan inputs\n"); - for (i = 0; i < 8; i++) + dev_dbg(dev, "Enabling monitoring of all fans\n"); + for (i = 0; i < 8; i++) { + if (!(sio_data->has_fanin & (1 << i))) /* Not wired */ + continue; pc87427_write8_bank(data, LD_FAN, BANK_FM(i), PC87427_REG_FAN_STATUS, FAN_STATUS_MONEN); - data->fan_enabled = 0xff; + } + data->fan_enabled = sio_data->has_fanin; + } + + /* Check which PWM outputs are enabled */ + for (i = 0; i < 4; i++) { + if (!(sio_data->has_fanout & (1 << i))) /* Not wired */ + continue; + reg = pc87427_read8_bank(data, LD_FAN, BANK_FC(i), + PC87427_REG_PWM_ENABLE); + if (reg & PWM_ENABLE_CTLEN) + data->pwm_enabled |= (1 << i); + + /* We don't expose an interface to reconfigure the automatic + fan control mode, so only allow to return to this mode if + it was originally set. */ + if ((reg & PWM_ENABLE_MODE_MASK) == PWM_MODE_AUTO) { + dev_dbg(dev, "PWM%d is in automatic control mode\n", + i + 1); + data->pwm_auto_ok |= (1 << i); + } + } + + /* The HMC module should be ready */ + reg = pc87427_read8(data, LD_TEMP, PC87427_REG_BANK); + if (!(reg & 0x80)) + dev_warn(dev, "%s module not ready!\n", "HMC"); + + /* Check which temperature channels are enabled */ + for (i = 0; i < 6; i++) { + reg = pc87427_read8_bank(data, LD_TEMP, BANK_TM(i), + PC87427_REG_TEMP_STATUS); + if (reg & TEMP_STATUS_CHANEN) + data->temp_enabled |= (1 << i); + } +} + +static void pc87427_remove_files(struct device *dev) +{ + struct pc87427_data *data = dev_get_drvdata(dev); + int i; + + device_remove_file(dev, &dev_attr_name); + for (i = 0; i < 8; i++) { + if (!(data->fan_enabled & (1 << i))) + continue; + sysfs_remove_group(&dev->kobj, &pc87427_group_fan[i]); + } + for (i = 0; i < 4; i++) { + if (!(data->pwm_enabled & (1 << i))) + continue; + sysfs_remove_group(&dev->kobj, &pc87427_group_pwm[i]); + } + for (i = 0; i < 6; i++) { + if (!(data->temp_enabled & (1 << i))) + continue; + sysfs_remove_group(&dev->kobj, &pc87427_group_temp[i]); } } static int __devinit pc87427_probe(struct platform_device *pdev) { + struct pc87427_sio_data *sio_data = pdev->dev.platform_data; struct pc87427_data *data; - struct resource *res; - int i, err; + int i, err, res_count; - if (!(data = kzalloc(sizeof(struct pc87427_data), GFP_KERNEL))) { + data = kzalloc(sizeof(struct pc87427_data), GFP_KERNEL); + if (!data) { err = -ENOMEM; printk(KERN_ERR DRVNAME ": Out of memory\n"); goto exit; } - /* This will need to be revisited when we add support for - temperature and voltage monitoring. */ - res = platform_get_resource(pdev, IORESOURCE_IO, 0); - if (!request_region(res->start, resource_size(res), DRVNAME)) { - err = -EBUSY; - dev_err(&pdev->dev, "Failed to request region 0x%lx-0x%lx\n", - (unsigned long)res->start, (unsigned long)res->end); + data->address[0] = sio_data->address[0]; + data->address[1] = sio_data->address[1]; + res_count = (data->address[0] != 0) + (data->address[1] != 0); + + err = pc87427_request_regions(pdev, res_count); + if (err) goto exit_kfree; - } - data->address[0] = res->start; mutex_init(&data->lock); data->name = "pc87427"; @@ -449,13 +1095,31 @@ static int __devinit pc87427_probe(struct platform_device *pdev) pc87427_init_device(&pdev->dev); /* Register sysfs hooks */ - if ((err = device_create_file(&pdev->dev, &dev_attr_name))) + err = device_create_file(&pdev->dev, &dev_attr_name); + if (err) goto exit_release_region; for (i = 0; i < 8; i++) { if (!(data->fan_enabled & (1 << i))) continue; - if ((err = sysfs_create_group(&pdev->dev.kobj, - &pc87427_group_fan[i]))) + err = sysfs_create_group(&pdev->dev.kobj, + &pc87427_group_fan[i]); + if (err) + goto exit_remove_files; + } + for (i = 0; i < 4; i++) { + if (!(data->pwm_enabled & (1 << i))) + continue; + err = sysfs_create_group(&pdev->dev.kobj, + &pc87427_group_pwm[i]); + if (err) + goto exit_remove_files; + } + for (i = 0; i < 6; i++) { + if (!(data->temp_enabled & (1 << i))) + continue; + err = sysfs_create_group(&pdev->dev.kobj, + &pc87427_group_temp[i]); + if (err) goto exit_remove_files; } @@ -469,13 +1133,9 @@ static int __devinit pc87427_probe(struct platform_device *pdev) return 0; exit_remove_files: - for (i = 0; i < 8; i++) { - if (!(data->fan_enabled & (1 << i))) - continue; - sysfs_remove_group(&pdev->dev.kobj, &pc87427_group_fan[i]); - } + pc87427_remove_files(&pdev->dev); exit_release_region: - release_region(res->start, resource_size(res)); + pc87427_release_regions(pdev, res_count); exit_kfree: platform_set_drvdata(pdev, NULL); kfree(data); @@ -486,21 +1146,16 @@ exit: static int __devexit pc87427_remove(struct platform_device *pdev) { struct pc87427_data *data = platform_get_drvdata(pdev); - struct resource *res; - int i; + int res_count; + + res_count = (data->address[0] != 0) + (data->address[1] != 0); hwmon_device_unregister(data->hwmon_dev); - device_remove_file(&pdev->dev, &dev_attr_name); - for (i = 0; i < 8; i++) { - if (!(data->fan_enabled & (1 << i))) - continue; - sysfs_remove_group(&pdev->dev.kobj, &pc87427_group_fan[i]); - } + pc87427_remove_files(&pdev->dev); platform_set_drvdata(pdev, NULL); kfree(data); - res = platform_get_resource(pdev, IORESOURCE_IO, 0); - release_region(res->start, resource_size(res)); + pc87427_release_regions(pdev, res_count); return 0; } @@ -515,34 +1170,50 @@ static struct platform_driver pc87427_driver = { .remove = __devexit_p(pc87427_remove), }; -static int __init pc87427_device_add(unsigned short address) +static int __init pc87427_device_add(const struct pc87427_sio_data *sio_data) { - struct resource res = { - .start = address, - .end = address + REGION_LENGTH - 1, - .name = logdev_str[0], - .flags = IORESOURCE_IO, + struct resource res[2] = { + { .flags = IORESOURCE_IO }, + { .flags = IORESOURCE_IO }, }; - int err; + int err, i, res_count; - err = acpi_check_resource_conflict(&res); - if (err) - goto exit; + res_count = 0; + for (i = 0; i < 2; i++) { + if (!sio_data->address[i]) + continue; + res[res_count].start = sio_data->address[i]; + res[res_count].end = sio_data->address[i] + REGION_LENGTH - 1; + res[res_count].name = logdev_str[i]; - pdev = platform_device_alloc(DRVNAME, address); + err = acpi_check_resource_conflict(&res[res_count]); + if (err) + goto exit; + + res_count++; + } + + pdev = platform_device_alloc(DRVNAME, res[0].start); if (!pdev) { err = -ENOMEM; printk(KERN_ERR DRVNAME ": Device allocation failed\n"); goto exit; } - err = platform_device_add_resources(pdev, &res, 1); + err = platform_device_add_resources(pdev, res, res_count); if (err) { printk(KERN_ERR DRVNAME ": Device resource addition failed " "(%d)\n", err); goto exit_device_put; } + err = platform_device_add_data(pdev, sio_data, + sizeof(struct pc87427_sio_data)); + if (err) { + printk(KERN_ERR DRVNAME ": Platform data allocation failed\n"); + goto exit_device_put; + } + err = platform_device_add(pdev); if (err) { printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n", @@ -558,9 +1229,10 @@ exit: return err; } -static int __init pc87427_find(int sioaddr, unsigned short *address) +static int __init pc87427_find(int sioaddr, struct pc87427_sio_data *sio_data) { u16 val; + u8 cfg, cfg_b; int i, err = 0; /* Identify device */ @@ -571,7 +1243,7 @@ static int __init pc87427_find(int sioaddr, unsigned short *address) } for (i = 0; i < 2; i++) { - address[i] = 0; + sio_data->address[i] = 0; /* Select logical device */ superio_outb(sioaddr, SIOREG_LDSEL, logdev[i]); @@ -596,9 +1268,58 @@ static int __init pc87427_find(int sioaddr, unsigned short *address) "for logical device 0x%02x\n", logdev[i]); continue; } - address[i] = val; + sio_data->address[i] = val; } + /* No point in loading the driver if everything is disabled */ + if (!sio_data->address[0] && !sio_data->address[1]) { + err = -ENODEV; + goto exit; + } + + /* Check which fan inputs are wired */ + sio_data->has_fanin = (1 << 2) | (1 << 3); /* FANIN2, FANIN3 */ + + cfg = superio_inb(sioaddr, SIOREG_CF2); + if (!(cfg & (1 << 3))) + sio_data->has_fanin |= (1 << 0); /* FANIN0 */ + if (!(cfg & (1 << 2))) + sio_data->has_fanin |= (1 << 4); /* FANIN4 */ + + cfg = superio_inb(sioaddr, SIOREG_CFD); + if (!(cfg & (1 << 0))) + sio_data->has_fanin |= (1 << 1); /* FANIN1 */ + + cfg = superio_inb(sioaddr, SIOREG_CF4); + if (!(cfg & (1 << 0))) + sio_data->has_fanin |= (1 << 7); /* FANIN7 */ + cfg_b = superio_inb(sioaddr, SIOREG_CFB); + if (!(cfg & (1 << 1)) && (cfg_b & (1 << 3))) + sio_data->has_fanin |= (1 << 5); /* FANIN5 */ + cfg = superio_inb(sioaddr, SIOREG_CF3); + if ((cfg & (1 << 3)) && !(cfg_b & (1 << 5))) + sio_data->has_fanin |= (1 << 6); /* FANIN6 */ + + /* Check which fan outputs are wired */ + sio_data->has_fanout = (1 << 0); /* FANOUT0 */ + if (cfg_b & (1 << 0)) + sio_data->has_fanout |= (1 << 3); /* FANOUT3 */ + + cfg = superio_inb(sioaddr, SIOREG_CFC); + if (!(cfg & (1 << 4))) { + if (cfg_b & (1 << 1)) + sio_data->has_fanout |= (1 << 1); /* FANOUT1 */ + if (cfg_b & (1 << 2)) + sio_data->has_fanout |= (1 << 2); /* FANOUT2 */ + } + + /* FANOUT1 and FANOUT2 can each be routed to 2 different pins */ + cfg = superio_inb(sioaddr, SIOREG_CF5); + if (cfg & (1 << 6)) + sio_data->has_fanout |= (1 << 1); /* FANOUT1 */ + if (cfg & (1 << 5)) + sio_data->has_fanout |= (1 << 2); /* FANOUT2 */ + exit: superio_exit(sioaddr); return err; @@ -607,15 +1328,10 @@ exit: static int __init pc87427_init(void) { int err; - unsigned short address[2]; - - if (pc87427_find(0x2e, address) - && pc87427_find(0x4e, address)) - return -ENODEV; + struct pc87427_sio_data sio_data; - /* For now the driver only handles fans so we only care about the - first address. */ - if (!address[0]) + if (pc87427_find(0x2e, &sio_data) + && pc87427_find(0x4e, &sio_data)) return -ENODEV; err = platform_driver_register(&pc87427_driver); @@ -623,7 +1339,7 @@ static int __init pc87427_init(void) goto exit; /* Sets global pdev as a side effect */ - err = pc87427_device_add(address[0]); + err = pc87427_device_add(&sio_data); if (err) goto exit_driver; diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c index 7442cf75485..ffb793af680 100644 --- a/drivers/hwmon/via-cputemp.c +++ b/drivers/hwmon/via-cputemp.c @@ -39,7 +39,7 @@ #define DRVNAME "via_cputemp" -enum { SHOW_TEMP, SHOW_LABEL, SHOW_NAME } SHOW; +enum { SHOW_TEMP, SHOW_LABEL, SHOW_NAME }; /* * Functions declaration diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index 0dcaba9b718..e96e69dd36f 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c @@ -39,6 +39,7 @@ w83627dhg 9 5 4 3 0xa020 0xc1 0x5ca3 w83627dhg-p 9 5 4 3 0xb070 0xc1 0x5ca3 w83667hg 9 5 3 3 0xa510 0xc1 0x5ca3 + w83667hg-b 9 5 3 3 0xb350 0xc1 0x5ca3 */ #include <linux/module.h> @@ -55,7 +56,7 @@ #include <linux/io.h> #include "lm75.h" -enum kinds { w83627ehf, w83627dhg, w83627dhg_p, w83667hg }; +enum kinds { w83627ehf, w83627dhg, w83627dhg_p, w83667hg, w83667hg_b }; /* used to set data->name = w83627ehf_device_names[data->sio_kind] */ static const char * w83627ehf_device_names[] = { @@ -63,6 +64,7 @@ static const char * w83627ehf_device_names[] = { "w83627dhg", "w83627dhg", "w83667hg", + "w83667hg", }; static unsigned short force_id; @@ -91,6 +93,7 @@ MODULE_PARM_DESC(force_id, "Override the detected device ID"); #define SIO_W83627DHG_ID 0xa020 #define SIO_W83627DHG_P_ID 0xb070 #define SIO_W83667HG_ID 0xa510 +#define SIO_W83667HG_B_ID 0xb350 #define SIO_ID_MASK 0xFFF0 static inline void @@ -201,8 +204,14 @@ static const u8 W83627EHF_REG_TOLERANCE[] = { 0x07, 0x07, 0x14, 0x62 }; static const u8 W83627EHF_REG_FAN_START_OUTPUT[] = { 0x0a, 0x0b, 0x16, 0x65 }; static const u8 W83627EHF_REG_FAN_STOP_OUTPUT[] = { 0x08, 0x09, 0x15, 0x64 }; static const u8 W83627EHF_REG_FAN_STOP_TIME[] = { 0x0c, 0x0d, 0x17, 0x66 }; -static const u8 W83627EHF_REG_FAN_MAX_OUTPUT[] = { 0xff, 0x67, 0xff, 0x69 }; -static const u8 W83627EHF_REG_FAN_STEP_OUTPUT[] = { 0xff, 0x68, 0xff, 0x6a }; + +static const u8 W83627EHF_REG_FAN_MAX_OUTPUT_COMMON[] + = { 0xff, 0x67, 0xff, 0x69 }; +static const u8 W83627EHF_REG_FAN_STEP_OUTPUT_COMMON[] + = { 0xff, 0x68, 0xff, 0x6a }; + +static const u8 W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B[] = { 0x67, 0x69, 0x6b }; +static const u8 W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B[] = { 0x68, 0x6a, 0x6c }; /* * Conversions @@ -277,6 +286,11 @@ struct w83627ehf_data { struct device *hwmon_dev; struct mutex lock; + const u8 *REG_FAN_START_OUTPUT; + const u8 *REG_FAN_STOP_OUTPUT; + const u8 *REG_FAN_MAX_OUTPUT; + const u8 *REG_FAN_STEP_OUTPUT; + struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ @@ -524,7 +538,10 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) } } - for (i = 0; i < 4; i++) { + for (i = 0; i < data->pwm_num; i++) { + if (!(data->has_fan & (1 << i))) + continue; + /* pwmcfg, tolerance mapped for i=0, i=1 to same reg */ if (i != 1) { pwmcfg = w83627ehf_read_value(data, @@ -546,6 +563,17 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) W83627EHF_REG_FAN_STOP_OUTPUT[i]); data->fan_stop_time[i] = w83627ehf_read_value(data, W83627EHF_REG_FAN_STOP_TIME[i]); + + if (data->REG_FAN_MAX_OUTPUT[i] != 0xff) + data->fan_max_output[i] = + w83627ehf_read_value(data, + data->REG_FAN_MAX_OUTPUT[i]); + + if (data->REG_FAN_STEP_OUTPUT[i] != 0xff) + data->fan_step_output[i] = + w83627ehf_read_value(data, + data->REG_FAN_STEP_OUTPUT[i]); + data->target_temp[i] = w83627ehf_read_value(data, W83627EHF_REG_TARGET[i]) & @@ -1126,7 +1154,7 @@ store_##reg(struct device *dev, struct device_attribute *attr, \ u32 val = SENSORS_LIMIT(simple_strtoul(buf, NULL, 10), 1, 255); \ mutex_lock(&data->update_lock); \ data->reg[nr] = val; \ - w83627ehf_write_value(data, W83627EHF_REG_##REG[nr], val); \ + w83627ehf_write_value(data, data->REG_##REG[nr], val); \ mutex_unlock(&data->update_lock); \ return count; \ } @@ -1206,12 +1234,26 @@ static struct sensor_device_attribute sda_sf3_arrays[] = { store_fan_stop_output, 1), SENSOR_ATTR(pwm3_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output, store_fan_stop_output, 2), +}; - /* pwm1 and pwm3 don't support max and step settings */ + +/* + * pwm1 and pwm3 don't support max and step settings on all chips. + * Need to check support while generating/removing attribute files. + */ +static struct sensor_device_attribute sda_sf3_max_step_arrays[] = { + SENSOR_ATTR(pwm1_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, + store_fan_max_output, 0), + SENSOR_ATTR(pwm1_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, + store_fan_step_output, 0), SENSOR_ATTR(pwm2_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, store_fan_max_output, 1), SENSOR_ATTR(pwm2_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, store_fan_step_output, 1), + SENSOR_ATTR(pwm3_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, + store_fan_max_output, 2), + SENSOR_ATTR(pwm3_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, + store_fan_step_output, 2), }; static ssize_t @@ -1235,6 +1277,12 @@ static void w83627ehf_device_remove_files(struct device *dev) for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) device_remove_file(dev, &sda_sf3_arrays[i].dev_attr); + for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) { + struct sensor_device_attribute *attr = + &sda_sf3_max_step_arrays[i]; + if (data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) + device_remove_file(dev, &attr->dev_attr); + } for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) device_remove_file(dev, &sda_sf3_arrays_fan4[i].dev_attr); for (i = 0; i < data->in_num; i++) { @@ -1343,22 +1391,37 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) /* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */ data->in_num = (sio_data->kind == w83627ehf) ? 10 : 9; /* 667HG has 3 pwms */ - data->pwm_num = (sio_data->kind == w83667hg) ? 3 : 4; + data->pwm_num = (sio_data->kind == w83667hg + || sio_data->kind == w83667hg_b) ? 3 : 4; /* Check temp3 configuration bit for 667HG */ - if (sio_data->kind == w83667hg) { + if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) { data->temp3_disable = w83627ehf_read_value(data, W83627EHF_REG_TEMP_CONFIG[1]) & 0x01; data->in6_skip = !data->temp3_disable; } + data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT; + data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT; + if (sio_data->kind == w83667hg_b) { + data->REG_FAN_MAX_OUTPUT = + W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B; + data->REG_FAN_STEP_OUTPUT = + W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B; + } else { + data->REG_FAN_MAX_OUTPUT = + W83627EHF_REG_FAN_MAX_OUTPUT_COMMON; + data->REG_FAN_STEP_OUTPUT = + W83627EHF_REG_FAN_STEP_OUTPUT_COMMON; + } + /* Initialize the chip */ w83627ehf_init_device(data); data->vrm = vid_which_vrm(); superio_enter(sio_data->sioreg); /* Read VID value */ - if (sio_data->kind == w83667hg) { + if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) { /* W83667HG has different pins for VID input and output, so we can get the VID input values directly at logical device D 0xe3. */ @@ -1409,7 +1472,7 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) } /* fan4 and fan5 share some pins with the GPIO and serial flash */ - if (sio_data->kind == w83667hg) { + if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) { fan5pin = superio_inb(sio_data->sioreg, 0x27) & 0x20; fan4pin = superio_inb(sio_data->sioreg, 0x27) & 0x40; } else { @@ -1440,6 +1503,15 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) &sda_sf3_arrays[i].dev_attr))) goto exit_remove; + for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) { + struct sensor_device_attribute *attr = + &sda_sf3_max_step_arrays[i]; + if (data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) { + err = device_create_file(dev, &attr->dev_attr); + if (err) + goto exit_remove; + } + } /* if fan4 is enabled create the sf3 files for it */ if ((data->has_fan & (1 << 3)) && data->pwm_num >= 4) for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) { @@ -1556,6 +1628,7 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr, static const char __initdata sio_name_W83627DHG[] = "W83627DHG"; static const char __initdata sio_name_W83627DHG_P[] = "W83627DHG-P"; static const char __initdata sio_name_W83667HG[] = "W83667HG"; + static const char __initdata sio_name_W83667HG_B[] = "W83667HG-B"; u16 val; const char *sio_name; @@ -1588,6 +1661,10 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr, sio_data->kind = w83667hg; sio_name = sio_name_W83667HG; break; + case SIO_W83667HG_B_ID: + sio_data->kind = w83667hg_b; + sio_name = sio_name_W83667HG_B; + break; default: if (val != 0xffff) pr_debug(DRVNAME ": unsupported chip ID: 0x%04x\n", diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 15a9702e294..6539ac2907e 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -448,6 +448,13 @@ config I2C_NOMADIK If you say yes to this option, support will be included for the I2C interface from ST-Ericsson's Nomadik and Ux500 architectures. +config I2C_NUC900 + tristate "NUC900 I2C Driver" + depends on ARCH_W90X900 + help + Say Y here to include support for I2C controller in the + Winbond/Nuvoton NUC900 based System-on-Chip devices. + config I2C_OCORES tristate "OpenCores I2C Controller" depends on EXPERIMENTAL @@ -496,8 +503,8 @@ config I2C_PMCMSP will be called i2c-pmcmsp. config I2C_PNX - tristate "I2C bus support for Philips PNX targets" - depends on ARCH_PNX4008 + tristate "I2C bus support for Philips PNX and NXP LPC targets" + depends on ARCH_PNX4008 || ARCH_LPC32XX help This driver supports the Philips IP3204 I2C IP block master and/or slave controller diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 936880bd1dc..c3ef49230cb 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -43,6 +43,7 @@ obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o obj-$(CONFIG_I2C_MPC) += i2c-mpc.o obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o obj-$(CONFIG_I2C_NOMADIK) += i2c-nomadik.o +obj-$(CONFIG_I2C_NUC900) += i2c-nuc900.o obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o obj-$(CONFIG_I2C_OMAP) += i2c-omap.o obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index 4523364e672..2222c87876b 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c @@ -36,14 +36,16 @@ #include <linux/platform_device.h> #include <linux/io.h> #include <linux/slab.h> +#include <linux/cpufreq.h> +#include <linux/gpio.h> #include <mach/hardware.h> - #include <mach/i2c.h> /* ----- global defines ----------------------------------------------- */ #define DAVINCI_I2C_TIMEOUT (1*HZ) +#define DAVINCI_I2C_MAX_TRIES 2 #define I2C_DAVINCI_INTR_ALL (DAVINCI_I2C_IMR_AAS | \ DAVINCI_I2C_IMR_SCD | \ DAVINCI_I2C_IMR_ARDY | \ @@ -72,37 +74,29 @@ #define DAVINCI_I2C_IVR_NACK 0x02 #define DAVINCI_I2C_IVR_AL 0x01 -#define DAVINCI_I2C_STR_BB (1 << 12) -#define DAVINCI_I2C_STR_RSFULL (1 << 11) -#define DAVINCI_I2C_STR_SCD (1 << 5) -#define DAVINCI_I2C_STR_ARDY (1 << 2) -#define DAVINCI_I2C_STR_NACK (1 << 1) -#define DAVINCI_I2C_STR_AL (1 << 0) - -#define DAVINCI_I2C_MDR_NACK (1 << 15) -#define DAVINCI_I2C_MDR_STT (1 << 13) -#define DAVINCI_I2C_MDR_STP (1 << 11) -#define DAVINCI_I2C_MDR_MST (1 << 10) -#define DAVINCI_I2C_MDR_TRX (1 << 9) -#define DAVINCI_I2C_MDR_XA (1 << 8) -#define DAVINCI_I2C_MDR_RM (1 << 7) -#define DAVINCI_I2C_MDR_IRS (1 << 5) - -#define DAVINCI_I2C_IMR_AAS (1 << 6) -#define DAVINCI_I2C_IMR_SCD (1 << 5) -#define DAVINCI_I2C_IMR_XRDY (1 << 4) -#define DAVINCI_I2C_IMR_RRDY (1 << 3) -#define DAVINCI_I2C_IMR_ARDY (1 << 2) -#define DAVINCI_I2C_IMR_NACK (1 << 1) -#define DAVINCI_I2C_IMR_AL (1 << 0) - -#define MOD_REG_BIT(val, mask, set) do { \ - if (set) { \ - val |= mask; \ - } else { \ - val &= ~mask; \ - } \ -} while (0) +#define DAVINCI_I2C_STR_BB BIT(12) +#define DAVINCI_I2C_STR_RSFULL BIT(11) +#define DAVINCI_I2C_STR_SCD BIT(5) +#define DAVINCI_I2C_STR_ARDY BIT(2) +#define DAVINCI_I2C_STR_NACK BIT(1) +#define DAVINCI_I2C_STR_AL BIT(0) + +#define DAVINCI_I2C_MDR_NACK BIT(15) +#define DAVINCI_I2C_MDR_STT BIT(13) +#define DAVINCI_I2C_MDR_STP BIT(11) +#define DAVINCI_I2C_MDR_MST BIT(10) +#define DAVINCI_I2C_MDR_TRX BIT(9) +#define DAVINCI_I2C_MDR_XA BIT(8) +#define DAVINCI_I2C_MDR_RM BIT(7) +#define DAVINCI_I2C_MDR_IRS BIT(5) + +#define DAVINCI_I2C_IMR_AAS BIT(6) +#define DAVINCI_I2C_IMR_SCD BIT(5) +#define DAVINCI_I2C_IMR_XRDY BIT(4) +#define DAVINCI_I2C_IMR_RRDY BIT(3) +#define DAVINCI_I2C_IMR_ARDY BIT(2) +#define DAVINCI_I2C_IMR_NACK BIT(1) +#define DAVINCI_I2C_IMR_AL BIT(0) struct davinci_i2c_dev { struct device *dev; @@ -113,8 +107,13 @@ struct davinci_i2c_dev { u8 *buf; size_t buf_len; int irq; + int stop; u8 terminate; struct i2c_adapter adapter; +#ifdef CONFIG_CPU_FREQ + struct completion xfr_complete; + struct notifier_block freq_transition; +#endif }; /* default platform data to use if not supplied in the platform_device */ @@ -134,12 +133,59 @@ static inline u16 davinci_i2c_read_reg(struct davinci_i2c_dev *i2c_dev, int reg) return __raw_readw(i2c_dev->base + reg); } -/* - * This functions configures I2C and brings I2C out of reset. - * This function is called during I2C init function. This function - * also gets called if I2C encounters any errors. +/* Generate a pulse on the i2c clock pin. */ +static void generic_i2c_clock_pulse(unsigned int scl_pin) +{ + u16 i; + + if (scl_pin) { + /* Send high and low on the SCL line */ + for (i = 0; i < 9; i++) { + gpio_set_value(scl_pin, 0); + udelay(20); + gpio_set_value(scl_pin, 1); + udelay(20); + } + } +} + +/* This routine does i2c bus recovery as specified in the + * i2c protocol Rev. 03 section 3.16 titled "Bus clear" */ -static int i2c_davinci_init(struct davinci_i2c_dev *dev) +static void i2c_recover_bus(struct davinci_i2c_dev *dev) +{ + u32 flag = 0; + struct davinci_i2c_platform_data *pdata = dev->dev->platform_data; + + dev_err(dev->dev, "initiating i2c bus recovery\n"); + /* Send NACK to the slave */ + flag = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG); + flag |= DAVINCI_I2C_MDR_NACK; + /* write the data into mode register */ + davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag); + if (pdata) + generic_i2c_clock_pulse(pdata->scl_pin); + /* Send STOP */ + flag = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG); + flag |= DAVINCI_I2C_MDR_STP; + davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag); +} + +static inline void davinci_i2c_reset_ctrl(struct davinci_i2c_dev *i2c_dev, + int val) +{ + u16 w; + + w = davinci_i2c_read_reg(i2c_dev, DAVINCI_I2C_MDR_REG); + if (!val) /* put I2C into reset */ + w &= ~DAVINCI_I2C_MDR_IRS; + else /* take I2C out of reset */ + w |= DAVINCI_I2C_MDR_IRS; + + davinci_i2c_write_reg(i2c_dev, DAVINCI_I2C_MDR_REG, w); +} + +static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev) { struct davinci_i2c_platform_data *pdata = dev->dev->platform_data; u16 psc; @@ -148,15 +194,6 @@ static int i2c_davinci_init(struct davinci_i2c_dev *dev) u32 clkh; u32 clkl; u32 input_clock = clk_get_rate(dev->clk); - u16 w; - - if (!pdata) - pdata = &davinci_i2c_platform_data_default; - - /* put I2C into reset */ - w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG); - MOD_REG_BIT(w, DAVINCI_I2C_MDR_IRS, 0); - davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w); /* NOTE: I2C Clock divider programming info * As per I2C specs the following formulas provide prescaler @@ -188,12 +225,32 @@ static int i2c_davinci_init(struct davinci_i2c_dev *dev) davinci_i2c_write_reg(dev, DAVINCI_I2C_CLKH_REG, clkh); davinci_i2c_write_reg(dev, DAVINCI_I2C_CLKL_REG, clkl); + dev_dbg(dev->dev, "input_clock = %d, CLK = %d\n", input_clock, clk); +} + +/* + * This function configures I2C and brings I2C out of reset. + * This function is called during I2C init function. This function + * also gets called if I2C encounters any errors. + */ +static int i2c_davinci_init(struct davinci_i2c_dev *dev) +{ + struct davinci_i2c_platform_data *pdata = dev->dev->platform_data; + + if (!pdata) + pdata = &davinci_i2c_platform_data_default; + + /* put I2C into reset */ + davinci_i2c_reset_ctrl(dev, 0); + + /* compute clock dividers */ + i2c_davinci_calc_clk_dividers(dev); + /* Respond at reserved "SMBus Host" slave address" (and zero); * we seem to have no option to not respond... */ davinci_i2c_write_reg(dev, DAVINCI_I2C_OAR_REG, 0x08); - dev_dbg(dev->dev, "input_clock = %d, CLK = %d\n", input_clock, clk); dev_dbg(dev->dev, "PSC = %d\n", davinci_i2c_read_reg(dev, DAVINCI_I2C_PSC_REG)); dev_dbg(dev->dev, "CLKL = %d\n", @@ -204,9 +261,7 @@ static int i2c_davinci_init(struct davinci_i2c_dev *dev) pdata->bus_freq, pdata->bus_delay); /* Take the I2C module out of reset: */ - w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG); - MOD_REG_BIT(w, DAVINCI_I2C_MDR_IRS, 1); - davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w); + davinci_i2c_reset_ctrl(dev, 1); /* Enable interrupts */ davinci_i2c_write_reg(dev, DAVINCI_I2C_IMR_REG, I2C_DAVINCI_INTR_ALL); @@ -221,14 +276,22 @@ static int i2c_davinci_wait_bus_not_busy(struct davinci_i2c_dev *dev, char allow_sleep) { unsigned long timeout; + static u16 to_cnt; timeout = jiffies + dev->adapter.timeout; while (davinci_i2c_read_reg(dev, DAVINCI_I2C_STR_REG) & DAVINCI_I2C_STR_BB) { - if (time_after(jiffies, timeout)) { - dev_warn(dev->dev, - "timeout waiting for bus ready\n"); - return -ETIMEDOUT; + if (to_cnt <= DAVINCI_I2C_MAX_TRIES) { + if (time_after(jiffies, timeout)) { + dev_warn(dev->dev, + "timeout waiting for bus ready\n"); + to_cnt++; + return -ETIMEDOUT; + } else { + to_cnt = 0; + i2c_recover_bus(dev); + i2c_davinci_init(dev); + } } if (allow_sleep) schedule_timeout(1); @@ -250,9 +313,6 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) u16 w; int r; - if (msg->len == 0) - return -EINVAL; - if (!pdata) pdata = &davinci_i2c_platform_data_default; /* Introduce a delay, required for some boards (e.g Davinci EVM) */ @@ -264,6 +324,7 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) dev->buf = msg->buf; dev->buf_len = msg->len; + dev->stop = stop; davinci_i2c_write_reg(dev, DAVINCI_I2C_CNT_REG, dev->buf_len); @@ -281,23 +342,40 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) flag |= DAVINCI_I2C_MDR_TRX; if (stop) flag |= DAVINCI_I2C_MDR_STP; + if (msg->len == 0) { + flag |= DAVINCI_I2C_MDR_RM; + flag &= ~DAVINCI_I2C_MDR_STP; + } /* Enable receive or transmit interrupts */ w = davinci_i2c_read_reg(dev, DAVINCI_I2C_IMR_REG); if (msg->flags & I2C_M_RD) - MOD_REG_BIT(w, DAVINCI_I2C_IMR_RRDY, 1); + w |= DAVINCI_I2C_IMR_RRDY; else - MOD_REG_BIT(w, DAVINCI_I2C_IMR_XRDY, 1); + w |= DAVINCI_I2C_IMR_XRDY; davinci_i2c_write_reg(dev, DAVINCI_I2C_IMR_REG, w); dev->terminate = 0; + /* write the data into mode register */ davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag); + /* + * First byte should be set here, not after interrupt, + * because transmit-data-ready interrupt can come before + * NACK-interrupt during sending of previous message and + * ICDXR may have wrong data + */ + if ((!(msg->flags & I2C_M_RD)) && dev->buf_len) { + davinci_i2c_write_reg(dev, DAVINCI_I2C_DXR_REG, *dev->buf++); + dev->buf_len--; + } + r = wait_for_completion_interruptible_timeout(&dev->cmd_complete, dev->adapter.timeout); if (r == 0) { dev_err(dev->dev, "controller timed out\n"); + i2c_recover_bus(dev); i2c_davinci_init(dev); dev->buf_len = 0; return -ETIMEDOUT; @@ -334,7 +412,7 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) return msg->len; if (stop) { w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG); - MOD_REG_BIT(w, DAVINCI_I2C_MDR_STP, 1); + w |= DAVINCI_I2C_MDR_STP; davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w); } return -EREMOTEIO; @@ -367,12 +445,17 @@ i2c_davinci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) if (ret < 0) return ret; } + +#ifdef CONFIG_CPU_FREQ + complete(&dev->xfr_complete); +#endif + return num; } static u32 i2c_davinci_func(struct i2c_adapter *adap) { - return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static void terminate_read(struct davinci_i2c_dev *dev) @@ -431,6 +514,14 @@ static irqreturn_t i2c_davinci_isr(int this_irq, void *dev_id) case DAVINCI_I2C_IVR_ARDY: davinci_i2c_write_reg(dev, DAVINCI_I2C_STR_REG, DAVINCI_I2C_STR_ARDY); + if (((dev->buf_len == 0) && (dev->stop != 0)) || + (dev->cmd_err & DAVINCI_I2C_STR_NACK)) { + w = davinci_i2c_read_reg(dev, + DAVINCI_I2C_MDR_REG); + w |= DAVINCI_I2C_MDR_STP; + davinci_i2c_write_reg(dev, + DAVINCI_I2C_MDR_REG, w); + } complete(&dev->cmd_complete); break; @@ -462,7 +553,7 @@ static irqreturn_t i2c_davinci_isr(int this_irq, void *dev_id) w = davinci_i2c_read_reg(dev, DAVINCI_I2C_IMR_REG); - MOD_REG_BIT(w, DAVINCI_I2C_IMR_XRDY, 0); + w &= ~DAVINCI_I2C_IMR_XRDY; davinci_i2c_write_reg(dev, DAVINCI_I2C_IMR_REG, w); @@ -491,6 +582,48 @@ static irqreturn_t i2c_davinci_isr(int this_irq, void *dev_id) return count ? IRQ_HANDLED : IRQ_NONE; } +#ifdef CONFIG_CPU_FREQ +static int i2c_davinci_cpufreq_transition(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct davinci_i2c_dev *dev; + + dev = container_of(nb, struct davinci_i2c_dev, freq_transition); + if (val == CPUFREQ_PRECHANGE) { + wait_for_completion(&dev->xfr_complete); + davinci_i2c_reset_ctrl(dev, 0); + } else if (val == CPUFREQ_POSTCHANGE) { + i2c_davinci_calc_clk_dividers(dev); + davinci_i2c_reset_ctrl(dev, 1); + } + + return 0; +} + +static inline int i2c_davinci_cpufreq_register(struct davinci_i2c_dev *dev) +{ + dev->freq_transition.notifier_call = i2c_davinci_cpufreq_transition; + + return cpufreq_register_notifier(&dev->freq_transition, + CPUFREQ_TRANSITION_NOTIFIER); +} + +static inline void i2c_davinci_cpufreq_deregister(struct davinci_i2c_dev *dev) +{ + cpufreq_unregister_notifier(&dev->freq_transition, + CPUFREQ_TRANSITION_NOTIFIER); +} +#else +static inline int i2c_davinci_cpufreq_register(struct davinci_i2c_dev *dev) +{ + return 0; +} + +static inline void i2c_davinci_cpufreq_deregister(struct davinci_i2c_dev *dev) +{ +} +#endif + static struct i2c_algorithm i2c_davinci_algo = { .master_xfer = i2c_davinci_xfer, .functionality = i2c_davinci_func, @@ -530,6 +663,9 @@ static int davinci_i2c_probe(struct platform_device *pdev) } init_completion(&dev->cmd_complete); +#ifdef CONFIG_CPU_FREQ + init_completion(&dev->xfr_complete); +#endif dev->dev = get_device(&pdev->dev); dev->irq = irq->start; platform_set_drvdata(pdev, dev); @@ -541,7 +677,12 @@ static int davinci_i2c_probe(struct platform_device *pdev) } clk_enable(dev->clk); - dev->base = (void __iomem *)IO_ADDRESS(mem->start); + dev->base = ioremap(mem->start, resource_size(mem)); + if (!dev->base) { + r = -EBUSY; + goto err_mem_ioremap; + } + i2c_davinci_init(dev); r = request_irq(dev->irq, i2c_davinci_isr, 0, pdev->name, dev); @@ -550,6 +691,12 @@ static int davinci_i2c_probe(struct platform_device *pdev) goto err_unuse_clocks; } + r = i2c_davinci_cpufreq_register(dev); + if (r) { + dev_err(&pdev->dev, "failed to register cpufreq\n"); + goto err_free_irq; + } + adap = &dev->adapter; i2c_set_adapdata(adap, dev); adap->owner = THIS_MODULE; @@ -571,6 +718,8 @@ static int davinci_i2c_probe(struct platform_device *pdev) err_free_irq: free_irq(dev->irq, dev); err_unuse_clocks: + iounmap(dev->base); +err_mem_ioremap: clk_disable(dev->clk); clk_put(dev->clk); dev->clk = NULL; @@ -589,6 +738,8 @@ static int davinci_i2c_remove(struct platform_device *pdev) struct davinci_i2c_dev *dev = platform_get_drvdata(pdev); struct resource *mem; + i2c_davinci_cpufreq_deregister(dev); + platform_set_drvdata(pdev, NULL); i2c_del_adapter(&dev->adapter); put_device(&pdev->dev); @@ -599,6 +750,7 @@ static int davinci_i2c_remove(struct platform_device *pdev) davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, 0); free_irq(IRQ_I2C, dev); + iounmap(dev->base); kfree(dev); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -606,6 +758,41 @@ static int davinci_i2c_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM +static int davinci_i2c_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct davinci_i2c_dev *i2c_dev = platform_get_drvdata(pdev); + + /* put I2C into reset */ + davinci_i2c_reset_ctrl(i2c_dev, 0); + clk_disable(i2c_dev->clk); + + return 0; +} + +static int davinci_i2c_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct davinci_i2c_dev *i2c_dev = platform_get_drvdata(pdev); + + clk_enable(i2c_dev->clk); + /* take I2C out of reset */ + davinci_i2c_reset_ctrl(i2c_dev, 1); + + return 0; +} + +static const struct dev_pm_ops davinci_i2c_pm = { + .suspend = davinci_i2c_suspend, + .resume = davinci_i2c_resume, +}; + +#define davinci_i2c_pm_ops (&davinci_i2c_pm) +#else +#define davinci_i2c_pm_ops NULL +#endif + /* work with hotplug and coldplug */ MODULE_ALIAS("platform:i2c_davinci"); @@ -615,6 +802,7 @@ static struct platform_driver davinci_i2c_driver = { .driver = { .name = "i2c_davinci", .owner = THIS_MODULE, + .pm = davinci_i2c_pm_ops, }, }; diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c new file mode 100644 index 00000000000..92d770d7bbc --- /dev/null +++ b/drivers/i2c/busses/i2c-nuc900.c @@ -0,0 +1,709 @@ +/* + * linux/drivers/i2c/busses/i2c-nuc900.c + * + * Copyright (c) 2010 Nuvoton technology corporation. + * + * This driver based on S3C2410 I2C driver of Ben Dooks <ben-Y5A6D6n0/KfQXOPxS62xeg@public.gmane.org>. + * Written by Wan ZongShun <mcuos.com-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation;version 2 of the License. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> + +#include <linux/i2c.h> +#include <linux/i2c-id.h> +#include <linux/init.h> +#include <linux/time.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/cpufreq.h> +#include <linux/slab.h> +#include <linux/io.h> + +#include <mach/mfp.h> +#include <mach/i2c.h> + +/* nuc900 i2c registers offset */ + +#define CSR 0x00 +#define DIVIDER 0x04 +#define CMDR 0x08 +#define SWR 0x0C +#define RXR 0x10 +#define TXR 0x14 + +/* nuc900 i2c CSR register bits */ + +#define IRQEN 0x003 +#define I2CBUSY 0x400 +#define I2CSTART 0x018 +#define IRQFLAG 0x004 +#define ARBIT_LOST 0x200 +#define SLAVE_ACK 0x800 + +/* nuc900 i2c CMDR register bits */ + +#define I2C_CMD_START 0x10 +#define I2C_CMD_STOP 0x08 +#define I2C_CMD_READ 0x04 +#define I2C_CMD_WRITE 0x02 +#define I2C_CMD_NACK 0x01 + +/* i2c controller state */ + +enum nuc900_i2c_state { + STATE_IDLE, + STATE_START, + STATE_READ, + STATE_WRITE, + STATE_STOP +}; + +/* i2c controller private data */ + +struct nuc900_i2c { + spinlock_t lock; + wait_queue_head_t wait; + + struct i2c_msg *msg; + unsigned int msg_num; + unsigned int msg_idx; + unsigned int msg_ptr; + unsigned int irq; + + enum nuc900_i2c_state state; + + void __iomem *regs; + struct clk *clk; + struct device *dev; + struct resource *ioarea; + struct i2c_adapter adap; +}; + +/* nuc900_i2c_master_complete + * + * complete the message and wake up the caller, using the given return code, + * or zero to mean ok. +*/ + +static inline void nuc900_i2c_master_complete(struct nuc900_i2c *i2c, int ret) +{ + dev_dbg(i2c->dev, "master_complete %d\n", ret); + + i2c->msg_ptr = 0; + i2c->msg = NULL; + i2c->msg_idx++; + i2c->msg_num = 0; + if (ret) + i2c->msg_idx = ret; + + wake_up(&i2c->wait); +} + +/* irq enable/disable functions */ + +static inline void nuc900_i2c_disable_irq(struct nuc900_i2c *i2c) +{ + unsigned long tmp; + + tmp = readl(i2c->regs + CSR); + writel(tmp & ~IRQEN, i2c->regs + CSR); +} + +static inline void nuc900_i2c_enable_irq(struct nuc900_i2c *i2c) +{ + unsigned long tmp; + + tmp = readl(i2c->regs + CSR); + writel(tmp | IRQEN, i2c->regs + CSR); +} + + +/* nuc900_i2c_message_start + * + * put the start of a message onto the bus +*/ + +static void nuc900_i2c_message_start(struct nuc900_i2c *i2c, + struct i2c_msg *msg) +{ + unsigned int addr = (msg->addr & 0x7f) << 1; + + if (msg->flags & I2C_M_RD) + addr |= 0x1; + writel(addr & 0xff, i2c->regs + TXR); + writel(I2C_CMD_START | I2C_CMD_WRITE, i2c->regs + CMDR); +} + +static inline void nuc900_i2c_stop(struct nuc900_i2c *i2c, int ret) +{ + + dev_dbg(i2c->dev, "STOP\n"); + + /* stop the transfer */ + i2c->state = STATE_STOP; + writel(I2C_CMD_STOP, i2c->regs + CMDR); + + nuc900_i2c_master_complete(i2c, ret); + nuc900_i2c_disable_irq(i2c); +} + +/* helper functions to determine the current state in the set of + * messages we are sending +*/ + +/* is_lastmsg() + * + * returns TRUE if the current message is the last in the set +*/ + +static inline int is_lastmsg(struct nuc900_i2c *i2c) +{ + return i2c->msg_idx >= (i2c->msg_num - 1); +} + +/* is_msglast + * + * returns TRUE if we this is the last byte in the current message +*/ + +static inline int is_msglast(struct nuc900_i2c *i2c) +{ + return i2c->msg_ptr == i2c->msg->len-1; +} + +/* is_msgend + * + * returns TRUE if we reached the end of the current message +*/ + +static inline int is_msgend(struct nuc900_i2c *i2c) +{ + return i2c->msg_ptr >= i2c->msg->len; +} + +/* i2c_nuc900_irq_nextbyte + * + * process an interrupt and work out what to do + */ + +static void i2c_nuc900_irq_nextbyte(struct nuc900_i2c *i2c, + unsigned long iicstat) +{ + unsigned char byte; + + switch (i2c->state) { + + case STATE_IDLE: + dev_err(i2c->dev, "%s: called in STATE_IDLE\n", __func__); + break; + + case STATE_STOP: + dev_err(i2c->dev, "%s: called in STATE_STOP\n", __func__); + nuc900_i2c_disable_irq(i2c); + break; + + case STATE_START: + /* last thing we did was send a start condition on the + * bus, or started a new i2c message + */ + + if (iicstat & SLAVE_ACK && + !(i2c->msg->flags & I2C_M_IGNORE_NAK)) { + /* ack was not received... */ + + dev_dbg(i2c->dev, "ack was not received\n"); + nuc900_i2c_stop(i2c, -ENXIO); + break; + } + + if (i2c->msg->flags & I2C_M_RD) + i2c->state = STATE_READ; + else + i2c->state = STATE_WRITE; + + /* terminate the transfer if there is nothing to do + * as this is used by the i2c probe to find devices. + */ + + if (is_lastmsg(i2c) && i2c->msg->len == 0) { + nuc900_i2c_stop(i2c, 0); + break; + } + + if (i2c->state == STATE_READ) + goto prepare_read; + + /* fall through to the write state, as we will need to + * send a byte as well + */ + + case STATE_WRITE: + /* we are writing data to the device... check for the + * end of the message, and if so, work out what to do + */ + + if (!(i2c->msg->flags & I2C_M_IGNORE_NAK)) { + if (iicstat & SLAVE_ACK) { + dev_dbg(i2c->dev, "WRITE: No Ack\n"); + + nuc900_i2c_stop(i2c, -ECONNREFUSED); + break; + } + } + +retry_write: + + if (!is_msgend(i2c)) { + byte = i2c->msg->buf[i2c->msg_ptr++]; + writeb(byte, i2c->regs + TXR); + writel(I2C_CMD_WRITE, i2c->regs + CMDR); + + } else if (!is_lastmsg(i2c)) { + /* we need to go to the next i2c message */ + + dev_dbg(i2c->dev, "WRITE: Next Message\n"); + + i2c->msg_ptr = 0; + i2c->msg_idx++; + i2c->msg++; + + /* check to see if we need to do another message */ + if (i2c->msg->flags & I2C_M_NOSTART) { + + if (i2c->msg->flags & I2C_M_RD) { + /* cannot do this, the controller + * forces us to send a new START + * when we change direction + */ + + nuc900_i2c_stop(i2c, -EINVAL); + } + + goto retry_write; + } else { + /* send the new start */ + nuc900_i2c_message_start(i2c, i2c->msg); + i2c->state = STATE_START; + } + + } else { + /* send stop */ + + nuc900_i2c_stop(i2c, 0); + } + break; + + case STATE_READ: + /* we have a byte of data in the data register, do + * something with it, and then work out wether we are + * going to do any more read/write + */ + + byte = readb(i2c->regs + RXR); + i2c->msg->buf[i2c->msg_ptr++] = byte; + +prepare_read: + if (is_msglast(i2c)) { + /* last byte of buffer */ + + if (is_lastmsg(i2c)) + writel(I2C_CMD_READ | I2C_CMD_NACK, + i2c->regs + CMDR); + + } else if (is_msgend(i2c)) { + /* ok, we've read the entire buffer, see if there + * is anything else we need to do + */ + + if (is_lastmsg(i2c)) { + /* last message, send stop and complete */ + dev_dbg(i2c->dev, "READ: Send Stop\n"); + + nuc900_i2c_stop(i2c, 0); + } else { + /* go to the next transfer */ + dev_dbg(i2c->dev, "READ: Next Transfer\n"); + + i2c->msg_ptr = 0; + i2c->msg_idx++; + i2c->msg++; + + writel(I2C_CMD_READ, i2c->regs + CMDR); + } + + } else { + writel(I2C_CMD_READ, i2c->regs + CMDR); + } + + break; + } +} + +/* nuc900_i2c_irq + * + * top level IRQ servicing routine +*/ + +static irqreturn_t nuc900_i2c_irq(int irqno, void *dev_id) +{ + struct nuc900_i2c *i2c = dev_id; + unsigned long status; + + status = readl(i2c->regs + CSR); + writel(status | IRQFLAG, i2c->regs + CSR); + + if (status & ARBIT_LOST) { + /* deal with arbitration loss */ + dev_err(i2c->dev, "deal with arbitration loss\n"); + goto out; + } + + if (i2c->state == STATE_IDLE) { + dev_dbg(i2c->dev, "IRQ: error i2c->state == IDLE\n"); + goto out; + } + + /* pretty much this leaves us with the fact that we've + * transmitted or received whatever byte we last sent + */ + + i2c_nuc900_irq_nextbyte(i2c, status); + + out: + return IRQ_HANDLED; +} + + +/* nuc900_i2c_set_master + * + * get the i2c bus for a master transaction +*/ + +static int nuc900_i2c_set_master(struct nuc900_i2c *i2c) +{ + int timeout = 400; + + while (timeout-- > 0) { + if (((readl(i2c->regs + SWR) & I2CSTART) == I2CSTART) && + ((readl(i2c->regs + CSR) & I2CBUSY) == 0)) { + return 0; + } + + msleep(1); + } + + return -ETIMEDOUT; +} + +/* nuc900_i2c_doxfer + * + * this starts an i2c transfer +*/ + +static int nuc900_i2c_doxfer(struct nuc900_i2c *i2c, + struct i2c_msg *msgs, int num) +{ + unsigned long iicstat, timeout; + int spins = 20; + int ret; + + ret = nuc900_i2c_set_master(i2c); + if (ret != 0) { + dev_err(i2c->dev, "cannot get bus (error %d)\n", ret); + ret = -EAGAIN; + goto out; + } + + spin_lock_irq(&i2c->lock); + + i2c->msg = msgs; + i2c->msg_num = num; + i2c->msg_ptr = 0; + i2c->msg_idx = 0; + i2c->state = STATE_START; + + nuc900_i2c_message_start(i2c, msgs); + spin_unlock_irq(&i2c->lock); + + timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5); + + ret = i2c->msg_idx; + + /* having these next two as dev_err() makes life very + * noisy when doing an i2cdetect + */ + + if (timeout == 0) + dev_dbg(i2c->dev, "timeout\n"); + else if (ret != num) + dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret); + + /* ensure the stop has been through the bus */ + + dev_dbg(i2c->dev, "waiting for bus idle\n"); + + /* first, try busy waiting briefly */ + do { + iicstat = readl(i2c->regs + CSR); + } while ((iicstat & I2CBUSY) && --spins); + + /* if that timed out sleep */ + if (!spins) { + msleep(1); + iicstat = readl(i2c->regs + CSR); + } + + if (iicstat & I2CBUSY) + dev_warn(i2c->dev, "timeout waiting for bus idle\n"); + + out: + return ret; +} + +/* nuc900_i2c_xfer + * + * first port of call from the i2c bus code when an message needs + * transferring across the i2c bus. +*/ + +static int nuc900_i2c_xfer(struct i2c_adapter *adap, + struct i2c_msg *msgs, int num) +{ + struct nuc900_i2c *i2c = (struct nuc900_i2c *)adap->algo_data; + int retry; + int ret; + + nuc900_i2c_enable_irq(i2c); + + for (retry = 0; retry < adap->retries; retry++) { + + ret = nuc900_i2c_doxfer(i2c, msgs, num); + + if (ret != -EAGAIN) + return ret; + + dev_dbg(i2c->dev, "Retrying transmission (%d)\n", retry); + + udelay(100); + } + + return -EREMOTEIO; +} + +/* declare our i2c functionality */ +static u32 nuc900_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING; +} + +/* i2c bus registration info */ + +static const struct i2c_algorithm nuc900_i2c_algorithm = { + .master_xfer = nuc900_i2c_xfer, + .functionality = nuc900_i2c_func, +}; + +/* nuc900_i2c_probe + * + * called by the bus driver when a suitable device is found +*/ + +static int __devinit nuc900_i2c_probe(struct platform_device *pdev) +{ + struct nuc900_i2c *i2c; + struct nuc900_platform_i2c *pdata; + struct resource *res; + int ret; + + pdata = pdev->dev.platform_data; + if (!pdata) { + dev_err(&pdev->dev, "no platform data\n"); + return -EINVAL; + } + + i2c = kzalloc(sizeof(struct nuc900_i2c), GFP_KERNEL); + if (!i2c) { + dev_err(&pdev->dev, "no memory for state\n"); + return -ENOMEM; + } + + strlcpy(i2c->adap.name, "nuc900-i2c0", sizeof(i2c->adap.name)); + i2c->adap.owner = THIS_MODULE; + i2c->adap.algo = &nuc900_i2c_algorithm; + i2c->adap.retries = 2; + i2c->adap.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; + + spin_lock_init(&i2c->lock); + init_waitqueue_head(&i2c->wait); + + /* find the clock and enable it */ + + i2c->dev = &pdev->dev; + i2c->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(i2c->clk)) { + dev_err(&pdev->dev, "cannot get clock\n"); + ret = -ENOENT; + goto err_noclk; + } + + dev_dbg(&pdev->dev, "clock source %p\n", i2c->clk); + + clk_enable(i2c->clk); + + /* map the registers */ + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(&pdev->dev, "cannot find IO resource\n"); + ret = -ENOENT; + goto err_clk; + } + + i2c->ioarea = request_mem_region(res->start, resource_size(res), + pdev->name); + + if (i2c->ioarea == NULL) { + dev_err(&pdev->dev, "cannot request IO\n"); + ret = -ENXIO; + goto err_clk; + } + + i2c->regs = ioremap(res->start, resource_size(res)); + + if (i2c->regs == NULL) { + dev_err(&pdev->dev, "cannot map IO\n"); + ret = -ENXIO; + goto err_ioarea; + } + + dev_dbg(&pdev->dev, "registers %p (%p, %p)\n", + i2c->regs, i2c->ioarea, res); + + /* setup info block for the i2c core */ + + i2c->adap.algo_data = i2c; + i2c->adap.dev.parent = &pdev->dev; + + mfp_set_groupg(&pdev->dev); + + clk_get_rate(i2c->clk); + + ret = (i2c->clk.apbfreq)/(pdata->bus_freq * 5) - 1; + writel(ret & 0xffff, i2c->regs + DIVIDER); + + /* find the IRQ for this unit (note, this relies on the init call to + * ensure no current IRQs pending + */ + + i2c->irq = ret = platform_get_irq(pdev, 0); + if (ret <= 0) { + dev_err(&pdev->dev, "cannot find IRQ\n"); + goto err_iomap; + } + + ret = request_irq(i2c->irq, nuc900_i2c_irq, IRQF_DISABLED | IRQF_SHARED, + dev_name(&pdev->dev), i2c); + + if (ret != 0) { + dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq); + goto err_iomap; + } + + /* Note, previous versions of the driver used i2c_add_adapter() + * to add the bus at any number. We now pass the bus number via + * the platform data, so if unset it will now default to always + * being bus 0. + */ + + i2c->adap.nr = pdata->bus_num; + + ret = i2c_add_numbered_adapter(&i2c->adap); + if (ret < 0) { + dev_err(&pdev->dev, "failed to add bus to i2c core\n"); + goto err_irq; + } + + platform_set_drvdata(pdev, i2c); + + dev_info(&pdev->dev, "%s: NUC900 I2C adapter\n", + dev_name(&i2c->adap.dev)); + return 0; + + err_irq: + free_irq(i2c->irq, i2c); + + err_iomap: + iounmap(i2c->regs); + + err_ioarea: + release_resource(i2c->ioarea); + kfree(i2c->ioarea); + + err_clk: + clk_disable(i2c->clk); + clk_put(i2c->clk); + + err_noclk: + kfree(i2c); + return ret; +} + +/* nuc900_i2c_remove + * + * called when device is removed from the bus +*/ + +static int __devexit nuc900_i2c_remove(struct platform_device *pdev) +{ + struct nuc900_i2c *i2c = platform_get_drvdata(pdev); + + i2c_del_adapter(&i2c->adap); + free_irq(i2c->irq, i2c); + + clk_disable(i2c->clk); + clk_put(i2c->clk); + + iounmap(i2c->regs); + + release_resource(i2c->ioarea); + kfree(i2c->ioarea); + kfree(i2c); + + return 0; +} + +static struct platform_driver nuc900_i2c_driver = { + .probe = nuc900_i2c_probe, + .remove = __devexit_p(nuc900_i2c_remove), + .driver = { + .owner = THIS_MODULE, + .name = "nuc900-i2c0", + }, +}; + +static int __init i2c_adap_nuc900_init(void) +{ + return platform_driver_register(&nuc900_i2c_driver); +} + +static void __exit i2c_adap_nuc900_exit(void) +{ + platform_driver_unregister(&nuc900_i2c_driver); +} +subsys_initcall(i2c_adap_nuc900_init); +module_exit(i2c_adap_nuc900_exit); + +MODULE_DESCRIPTION("NUC900 I2C Bus driver"); +MODULE_AUTHOR("Wan ZongShun, <mcuos.com-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:nuc900-i2c0"); diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 020ff23d762..c94e51b2651 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -1001,7 +1001,7 @@ static int i2c_pxa_probe(struct platform_device *dev) struct pxa_i2c *i2c; struct resource *res; struct i2c_pxa_platform_data *plat = dev->dev.platform_data; - struct platform_device_id *id = platform_get_device_id(dev); + const struct platform_device_id *id = platform_get_device_id(dev); int ret; int irq; diff --git a/drivers/idle/Kconfig b/drivers/idle/Kconfig index fb5c5186d4a..8489eb58a52 100644 --- a/drivers/idle/Kconfig +++ b/drivers/idle/Kconfig @@ -1,9 +1,8 @@ config INTEL_IDLE - tristate "Cpuidle Driver for Intel Processors" + bool "Cpuidle Driver for Intel Processors" depends on CPU_IDLE depends on X86 depends on CPU_SUP_INTEL - depends on EXPERIMENTAL help Enable intel_idle, a cpuidle driver that includes knowledge of native Intel hardware idle features. The acpi_idle driver diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 03d202b1ff2..a10152bb142 100755 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -77,10 +77,8 @@ static struct cpuidle_driver intel_idle_driver = { }; /* intel_idle.max_cstate=0 disables driver */ static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1; -static int power_policy = 7; /* 0 = max perf; 15 = max powersave */ -static unsigned int substates; -static int (*choose_substate)(int); +static unsigned int mwait_substates; /* Reliable LAPIC Timer States, bit 1 for C1 etc. */ static unsigned int lapic_timer_reliable_states; @@ -168,41 +166,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { .enter = NULL }, /* disabled */ }; -/* - * choose_tunable_substate() - * - * Run-time decision on which C-state substate to invoke - * If power_policy = 0, choose shallowest substate (0) - * If power_policy = 15, choose deepest substate - * If power_policy = middle, choose middle substate etc. - */ -static int choose_tunable_substate(int cstate) -{ - unsigned int num_substates; - unsigned int substate_choice; - - power_policy &= 0xF; /* valid range: 0-15 */ - cstate &= 7; /* valid range: 0-7 */ - - num_substates = (substates >> ((cstate) * 4)) & MWAIT_SUBSTATE_MASK; - - if (num_substates <= 1) - return 0; - - substate_choice = ((power_policy + (power_policy + 1) * - (num_substates - 1)) / 16); - - return substate_choice; -} - -/* - * choose_zero_substate() - */ -static int choose_zero_substate(int cstate) -{ - return 0; -} - /** * intel_idle * @dev: cpuidle_device @@ -220,8 +183,6 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; - eax = eax + (choose_substate)(cstate); - local_irq_disable(); if (!(lapic_timer_reliable_states & (1 << (cstate)))) @@ -259,7 +220,7 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) */ static int intel_idle_probe(void) { - unsigned int eax, ebx, ecx, edx; + unsigned int eax, ebx, ecx; if (max_cstate == 0) { pr_debug(PREFIX "disabled\n"); @@ -275,17 +236,13 @@ static int intel_idle_probe(void) if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) return -ENODEV; - cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); + cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) return -ENODEV; -#ifdef DEBUG - if (substates == 0) /* can over-ride via modparam */ -#endif - substates = edx; - pr_debug(PREFIX "MWAIT substates: 0x%x\n", substates); + pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates); if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ lapic_timer_reliable_states = 0xFFFFFFFF; @@ -299,18 +256,18 @@ static int intel_idle_probe(void) case 0x1E: /* Core i7 and i5 Processor - Lynnfield Jasper Forest */ case 0x1F: /* Core i7 and i5 Processor - Nehalem */ case 0x2E: /* Nehalem-EX Xeon */ + case 0x2F: /* Westmere-EX Xeon */ lapic_timer_reliable_states = (1 << 1); /* C1 */ case 0x25: /* Westmere */ case 0x2C: /* Westmere */ cpuidle_state_table = nehalem_cstates; - choose_substate = choose_tunable_substate; break; case 0x1C: /* 28 - Atom Processor */ + case 0x26: /* 38 - Lincroft Atom Processor */ lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */ cpuidle_state_table = atom_cstates; - choose_substate = choose_zero_substate; break; #ifdef FUTURE_USE case 0x17: /* 23 - Core 2 Duo */ @@ -376,7 +333,7 @@ static int intel_idle_cpuidle_devices_init(void) } /* does the state exist in CPUID.MWAIT? */ - num_substates = (substates >> ((cstate) * 4)) + num_substates = (mwait_substates >> ((cstate) * 4)) & MWAIT_SUBSTATE_MASK; if (num_substates == 0) continue; @@ -450,11 +407,7 @@ static void __exit intel_idle_exit(void) module_init(intel_idle_init); module_exit(intel_idle_exit); -module_param(power_policy, int, 0644); module_param(max_cstate, int, 0444); -#ifdef DEBUG -module_param(substates, int, 0444); -#endif MODULE_AUTHOR("Len Brown <len.brown@intel.com>"); MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION); diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c index d0dc1db80b2..50815022cff 100644 --- a/drivers/ieee1394/ohci1394.c +++ b/drivers/ieee1394/ohci1394.c @@ -1106,7 +1106,7 @@ static int ohci_iso_recv_init(struct hpsb_iso *iso) if (recv->block_irq_interval * 4 > iso->buf_packets) recv->block_irq_interval = iso->buf_packets / 4; if (recv->block_irq_interval < 1) - recv->block_irq_interval = 1; + recv->block_irq_interval = 1; /* choose a buffer stride */ /* must be a power of 2, and <= PAGE_SIZE */ diff --git a/drivers/input/joystick/amijoy.c b/drivers/input/joystick/amijoy.c index e90694fe0d5..0bc86204213 100644 --- a/drivers/input/joystick/amijoy.c +++ b/drivers/input/joystick/amijoy.c @@ -139,7 +139,7 @@ static int __init amijoy_init(void) amijoy_dev[i]->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT); for (j = 0; j < 2; j++) { - XXinput_set_abs_params(amijoy_dev[i], ABS_X + j, + input_set_abs_params(amijoy_dev[i], ABS_X + j, -1, 1, 0, 0); } diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c index ddd5afd301d..19fa94af207 100644 --- a/drivers/input/keyboard/hil_kbd.c +++ b/drivers/input/keyboard/hil_kbd.c @@ -232,13 +232,13 @@ static void hil_dev_handle_ptr_events(struct hil_dev *ptr) if (absdev) { val = lo + (hi << 8); #ifdef TABLET_AUTOADJUST - if (val < input_abs_min(dev, ABS_X + i)) + if (val < input_abs_get_min(dev, ABS_X + i)) input_abs_set_min(dev, ABS_X + i, val); - if (val > input_abs_max(dev, ABS_X + i)) - XXinput_abs_set_max(dev, ABS_X + i, val); + if (val > input_abs_get_max(dev, ABS_X + i)) + input_abs_set_max(dev, ABS_X + i, val); #endif if (i % 3) - val = input_abs_max(dev, ABS_X + i) - val; + val = input_abs_get_max(dev, ABS_X + i) - val; input_report_abs(dev, ABS_X + i, val); } else { val = (int) (((int8_t) lo) | ((int8_t) hi << 8)); @@ -388,11 +388,11 @@ static void hil_dev_pointer_setup(struct hil_dev *ptr) #ifdef TABLET_AUTOADJUST for (i = 0; i < ABS_MAX; i++) { - int diff = input_abs_max(input_dev, ABS_X + i) / 10; + int diff = input_abs_get_max(input_dev, ABS_X + i) / 10; input_abs_set_min(input_dev, ABS_X + i, - input_abs_min(input_dev, ABS_X + i) + diff) - XXinput_abs_set_max(input_dev, ABS_X + i, - input_abs_max(input_dev, ABS_X + i) - diff) + input_abs_get_min(input_dev, ABS_X + i) + diff); + input_abs_set_max(input_dev, ABS_X + i, + input_abs_get_max(input_dev, ABS_X + i) - diff); } #endif diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c index 0e53b3bc39a..f32404f9918 100644 --- a/drivers/input/keyboard/pxa27x_keypad.c +++ b/drivers/input/keyboard/pxa27x_keypad.c @@ -567,8 +567,6 @@ static int __devexit pxa27x_keypad_remove(struct platform_device *pdev) clk_put(keypad->clk); input_unregister_device(keypad->input_dev); - input_free_device(keypad->input_dev); - iounmap(keypad->mmio_base); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index bb53fd33cd1..0d4266a533a 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -811,6 +811,8 @@ static struct miscdevice uinput_misc = { .minor = UINPUT_MINOR, .name = UINPUT_NAME, }; +MODULE_ALIAS_MISCDEV(UINPUT_MINOR); +MODULE_ALIAS("devname:" UINPUT_NAME); static int __init uinput_init(void) { diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c index 83c24cca234..d528a2dba06 100644 --- a/drivers/input/mousedev.c +++ b/drivers/input/mousedev.c @@ -138,8 +138,8 @@ static void mousedev_touchpad_event(struct input_dev *dev, fx(0) = value; if (mousedev->touch && mousedev->pkt_count >= 2) { - size = input_abs_get_min(dev, ABS_X) - - input_abs_get_max(dev, ABS_X); + size = input_abs_get_max(dev, ABS_X) - + input_abs_get_min(dev, ABS_X); if (size == 0) size = 256 * 2; @@ -155,8 +155,8 @@ static void mousedev_touchpad_event(struct input_dev *dev, fy(0) = value; if (mousedev->touch && mousedev->pkt_count >= 2) { /* use X size for ABS_Y to keep the same scale */ - size = input_abs_get_min(dev, ABS_X) - - input_abs_get_max(dev, ABS_X); + size = input_abs_get_max(dev, ABS_X) - + input_abs_get_min(dev, ABS_X); if (size == 0) size = 256 * 2; diff --git a/drivers/isdn/hardware/avm/Kconfig b/drivers/isdn/hardware/avm/Kconfig index 5dbcbe3a54a..b99b906ea9b 100644 --- a/drivers/isdn/hardware/avm/Kconfig +++ b/drivers/isdn/hardware/avm/Kconfig @@ -36,12 +36,13 @@ config ISDN_DRV_AVMB1_T1ISA config ISDN_DRV_AVMB1_B1PCMCIA tristate "AVM B1/M1/M2 PCMCIA support" + depends on PCMCIA help Enable support for the PCMCIA version of the AVM B1 card. config ISDN_DRV_AVMB1_AVM_CS tristate "AVM B1/M1/M2 PCMCIA cs module" - depends on ISDN_DRV_AVMB1_B1PCMCIA && PCMCIA + depends on ISDN_DRV_AVMB1_B1PCMCIA help Enable the PCMCIA client driver for the AVM B1/M1/M2 PCMCIA cards. diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 35bc2737412..2d17e76066b 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -45,6 +45,7 @@ #include <linux/syscalls.h> #include <linux/suspend.h> #include <linux/cpu.h> +#include <linux/compat.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/io.h> @@ -2349,11 +2350,52 @@ static long pmu_unlocked_ioctl(struct file *filp, return ret; } +#ifdef CONFIG_COMPAT +#define PMU_IOC_GET_BACKLIGHT32 _IOR('B', 1, compat_size_t) +#define PMU_IOC_SET_BACKLIGHT32 _IOW('B', 2, compat_size_t) +#define PMU_IOC_GET_MODEL32 _IOR('B', 3, compat_size_t) +#define PMU_IOC_HAS_ADB32 _IOR('B', 4, compat_size_t) +#define PMU_IOC_CAN_SLEEP32 _IOR('B', 5, compat_size_t) +#define PMU_IOC_GRAB_BACKLIGHT32 _IOR('B', 6, compat_size_t) + +static long compat_pmu_ioctl (struct file *filp, u_int cmd, u_long arg) +{ + switch (cmd) { + case PMU_IOC_SLEEP: + break; + case PMU_IOC_GET_BACKLIGHT32: + cmd = PMU_IOC_GET_BACKLIGHT; + break; + case PMU_IOC_SET_BACKLIGHT32: + cmd = PMU_IOC_SET_BACKLIGHT; + break; + case PMU_IOC_GET_MODEL32: + cmd = PMU_IOC_GET_MODEL; + break; + case PMU_IOC_HAS_ADB32: + cmd = PMU_IOC_HAS_ADB; + break; + case PMU_IOC_CAN_SLEEP32: + cmd = PMU_IOC_CAN_SLEEP; + break; + case PMU_IOC_GRAB_BACKLIGHT32: + cmd = PMU_IOC_GRAB_BACKLIGHT; + break; + default: + return -ENOIOCTLCMD; + } + return pmu_unlocked_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); +} +#endif + static const struct file_operations pmu_device_fops = { .read = pmu_read, .write = pmu_write, .poll = pmu_fpoll, .unlocked_ioctl = pmu_unlocked_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = compat_pmu_ioctl, +#endif .open = pmu_open, .release = pmu_release, }; diff --git a/drivers/md/.gitignore b/drivers/md/.gitignore deleted file mode 100644 index a7afec6b19c..00000000000 --- a/drivers/md/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -mktables -raid6altivec*.c -raid6int*.c -raid6tables.c diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 1ba1e122e94..ed4900ade93 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1542,8 +1542,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector) atomic_read(&bitmap->mddev->recovery_active) == 0); bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync; - if (bitmap->mddev->persistent) - set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags); + set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags); sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1); s = 0; while (s < sector && s < bitmap->mddev->resync_max_sectors) { diff --git a/drivers/md/md.c b/drivers/md/md.c index 11567c7999a..43cf9cc9c1d 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2136,16 +2136,6 @@ static void sync_sbs(mddev_t * mddev, int nospares) * with the rest of the array) */ mdk_rdev_t *rdev; - - /* First make sure individual recovery_offsets are correct */ - list_for_each_entry(rdev, &mddev->disks, same_set) { - if (rdev->raid_disk >= 0 && - mddev->delta_disks >= 0 && - !test_bit(In_sync, &rdev->flags) && - mddev->curr_resync_completed > rdev->recovery_offset) - rdev->recovery_offset = mddev->curr_resync_completed; - - } list_for_each_entry(rdev, &mddev->disks, same_set) { if (rdev->sb_events == mddev->events || (nospares && @@ -2167,13 +2157,27 @@ static void md_update_sb(mddev_t * mddev, int force_change) int sync_req; int nospares = 0; - mddev->utime = get_seconds(); - if (mddev->external) - return; repeat: + /* First make sure individual recovery_offsets are correct */ + list_for_each_entry(rdev, &mddev->disks, same_set) { + if (rdev->raid_disk >= 0 && + mddev->delta_disks >= 0 && + !test_bit(In_sync, &rdev->flags) && + mddev->curr_resync_completed > rdev->recovery_offset) + rdev->recovery_offset = mddev->curr_resync_completed; + + } + if (!mddev->persistent) { + clear_bit(MD_CHANGE_CLEAN, &mddev->flags); + clear_bit(MD_CHANGE_DEVS, &mddev->flags); + wake_up(&mddev->sb_wait); + return; + } + spin_lock_irq(&mddev->write_lock); - set_bit(MD_CHANGE_PENDING, &mddev->flags); + mddev->utime = get_seconds(); + if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) force_change = 1; if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) @@ -2221,19 +2225,6 @@ repeat: MD_BUG(); mddev->events --; } - - /* - * do not write anything to disk if using - * nonpersistent superblocks - */ - if (!mddev->persistent) { - if (!mddev->external) - clear_bit(MD_CHANGE_PENDING, &mddev->flags); - - spin_unlock_irq(&mddev->write_lock); - wake_up(&mddev->sb_wait); - return; - } sync_sbs(mddev, nospares); spin_unlock_irq(&mddev->write_lock); @@ -3379,7 +3370,7 @@ array_state_show(mddev_t *mddev, char *page) case 0: if (mddev->in_sync) st = clean; - else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags)) + else if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) st = write_pending; else if (mddev->safemode) st = active_idle; @@ -3460,9 +3451,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len) mddev->in_sync = 1; if (mddev->safemode == 1) mddev->safemode = 0; - if (mddev->persistent) - set_bit(MD_CHANGE_CLEAN, - &mddev->flags); + set_bit(MD_CHANGE_CLEAN, &mddev->flags); } err = 0; } else @@ -3474,8 +3463,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len) case active: if (mddev->pers) { restart_array(mddev); - if (mddev->external) - clear_bit(MD_CHANGE_CLEAN, &mddev->flags); + clear_bit(MD_CHANGE_PENDING, &mddev->flags); wake_up(&mddev->sb_wait); err = 0; } else { @@ -6580,6 +6568,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi) if (mddev->in_sync) { mddev->in_sync = 0; set_bit(MD_CHANGE_CLEAN, &mddev->flags); + set_bit(MD_CHANGE_PENDING, &mddev->flags); md_wakeup_thread(mddev->thread); did_change = 1; } @@ -6588,7 +6577,6 @@ void md_write_start(mddev_t *mddev, struct bio *bi) if (did_change) sysfs_notify_dirent_safe(mddev->sysfs_state); wait_event(mddev->sb_wait, - !test_bit(MD_CHANGE_CLEAN, &mddev->flags) && !test_bit(MD_CHANGE_PENDING, &mddev->flags)); } @@ -6624,6 +6612,7 @@ int md_allow_write(mddev_t *mddev) if (mddev->in_sync) { mddev->in_sync = 0; set_bit(MD_CHANGE_CLEAN, &mddev->flags); + set_bit(MD_CHANGE_PENDING, &mddev->flags); if (mddev->safemode_delay && mddev->safemode == 0) mddev->safemode = 1; @@ -6633,7 +6622,7 @@ int md_allow_write(mddev_t *mddev) } else spin_unlock_irq(&mddev->write_lock); - if (test_bit(MD_CHANGE_CLEAN, &mddev->flags)) + if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) return -EAGAIN; else return 0; @@ -6831,8 +6820,7 @@ void md_do_sync(mddev_t *mddev) atomic_read(&mddev->recovery_active) == 0); mddev->curr_resync_completed = mddev->curr_resync; - if (mddev->persistent) - set_bit(MD_CHANGE_CLEAN, &mddev->flags); + set_bit(MD_CHANGE_CLEAN, &mddev->flags); sysfs_notify(&mddev->kobj, NULL, "sync_completed"); } @@ -7111,8 +7099,7 @@ void md_check_recovery(mddev_t *mddev) mddev->recovery_cp == MaxSector) { mddev->in_sync = 1; did_change = 1; - if (mddev->persistent) - set_bit(MD_CHANGE_CLEAN, &mddev->flags); + set_bit(MD_CHANGE_CLEAN, &mddev->flags); } if (mddev->safemode == 1) mddev->safemode = 0; diff --git a/drivers/md/md.h b/drivers/md/md.h index a953fe2808a..3931299788d 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -140,7 +140,7 @@ struct mddev_s unsigned long flags; #define MD_CHANGE_DEVS 0 /* Some device status has changed */ #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ -#define MD_CHANGE_PENDING 2 /* superblock update in progress */ +#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */ int suspended; atomic_t active_io; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 73cc74ffc26..ad83a4dcadc 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -787,8 +787,8 @@ static int make_request(mddev_t *mddev, struct bio * bio) struct bio_list bl; struct page **behind_pages = NULL; const int rw = bio_data_dir(bio); - const bool do_sync = (bio->bi_rw & REQ_SYNC); - bool do_barriers; + const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); + unsigned long do_barriers; mdk_rdev_t *blocked_rdev; /* @@ -1120,6 +1120,8 @@ static int raid1_spare_active(mddev_t *mddev) { int i; conf_t *conf = mddev->private; + int count = 0; + unsigned long flags; /* * Find all failed disks within the RAID1 configuration @@ -1131,15 +1133,16 @@ static int raid1_spare_active(mddev_t *mddev) if (rdev && !test_bit(Faulty, &rdev->flags) && !test_and_set_bit(In_sync, &rdev->flags)) { - unsigned long flags; - spin_lock_irqsave(&conf->device_lock, flags); - mddev->degraded--; - spin_unlock_irqrestore(&conf->device_lock, flags); + count++; + sysfs_notify_dirent(rdev->sysfs_state); } } + spin_lock_irqsave(&conf->device_lock, flags); + mddev->degraded -= count; + spin_unlock_irqrestore(&conf->device_lock, flags); print_conf(conf); - return 0; + return count; } @@ -1640,7 +1643,7 @@ static void raid1d(mddev_t *mddev) * We already have a nr_pending reference on these rdevs. */ int i; - const bool do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC); + const unsigned long do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC); clear_bit(R1BIO_BarrierRetry, &r1_bio->state); clear_bit(R1BIO_Barrier, &r1_bio->state); for (i=0; i < conf->raid_disks; i++) @@ -1696,7 +1699,7 @@ static void raid1d(mddev_t *mddev) (unsigned long long)r1_bio->sector); raid_end_bio_io(r1_bio); } else { - const bool do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC; + const unsigned long do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC; r1_bio->bios[r1_bio->read_disk] = mddev->ro ? IO_BLOCKED : NULL; r1_bio->read_disk = disk; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index a88aeb5198c..84718383124 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -799,7 +799,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) int i; int chunk_sects = conf->chunk_mask + 1; const int rw = bio_data_dir(bio); - const bool do_sync = (bio->bi_rw & REQ_SYNC); + const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); struct bio_list bl; unsigned long flags; mdk_rdev_t *blocked_rdev; @@ -1116,6 +1116,8 @@ static int raid10_spare_active(mddev_t *mddev) int i; conf_t *conf = mddev->private; mirror_info_t *tmp; + int count = 0; + unsigned long flags; /* * Find all non-in_sync disks within the RAID10 configuration @@ -1126,15 +1128,16 @@ static int raid10_spare_active(mddev_t *mddev) if (tmp->rdev && !test_bit(Faulty, &tmp->rdev->flags) && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { - unsigned long flags; - spin_lock_irqsave(&conf->device_lock, flags); - mddev->degraded--; - spin_unlock_irqrestore(&conf->device_lock, flags); + count++; + sysfs_notify_dirent(tmp->rdev->sysfs_state); } } + spin_lock_irqsave(&conf->device_lock, flags); + mddev->degraded -= count; + spin_unlock_irqrestore(&conf->device_lock, flags); print_conf(conf); - return 0; + return count; } @@ -1734,7 +1737,7 @@ static void raid10d(mddev_t *mddev) raid_end_bio_io(r10_bio); bio_put(bio); } else { - const bool do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC); + const unsigned long do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC); bio_put(bio); rdev = conf->mirrors[mirror].rdev; if (printk_ratelimit()) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 866d4b5a144..69b0a169e43 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5330,6 +5330,8 @@ static int raid5_spare_active(mddev_t *mddev) int i; raid5_conf_t *conf = mddev->private; struct disk_info *tmp; + int count = 0; + unsigned long flags; for (i = 0; i < conf->raid_disks; i++) { tmp = conf->disks + i; @@ -5337,14 +5339,15 @@ static int raid5_spare_active(mddev_t *mddev) && tmp->rdev->recovery_offset == MaxSector && !test_bit(Faulty, &tmp->rdev->flags) && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { - unsigned long flags; - spin_lock_irqsave(&conf->device_lock, flags); - mddev->degraded--; - spin_unlock_irqrestore(&conf->device_lock, flags); + count++; + sysfs_notify_dirent(tmp->rdev->sysfs_state); } } + spin_lock_irqsave(&conf->device_lock, flags); + mddev->degraded -= count; + spin_unlock_irqrestore(&conf->device_lock, flags); print_raid5_conf(conf); - return 0; + return count; } static int raid5_remove_disk(mddev_t *mddev, int number) diff --git a/drivers/media/dvb/mantis/Kconfig b/drivers/media/dvb/mantis/Kconfig index decdeda840d..fd0830ed10d 100644 --- a/drivers/media/dvb/mantis/Kconfig +++ b/drivers/media/dvb/mantis/Kconfig @@ -1,6 +1,6 @@ config MANTIS_CORE tristate "Mantis/Hopper PCI bridge based devices" - depends on PCI && I2C && INPUT + depends on PCI && I2C && INPUT && IR_CORE help Support for PCI cards based on the Mantis and Hopper PCi bridge. diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c index d2f20c2acae..073f01390cd 100644 --- a/drivers/media/video/v4l2-compat-ioctl32.c +++ b/drivers/media/video/v4l2-compat-ioctl32.c @@ -228,11 +228,6 @@ static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (file->f_op->unlocked_ioctl) ret = file->f_op->unlocked_ioctl(file, cmd, arg); - else if (file->f_op->ioctl) { - lock_kernel(); - ret = file->f_op->ioctl(file->f_path.dentry->d_inode, file, cmd, arg); - unlock_kernel(); - } return ret; } @@ -973,7 +968,7 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg) { long ret = -ENOIOCTLCMD; - if (!file->f_op->ioctl && !file->f_op->unlocked_ioctl) + if (!file->f_op->unlocked_ioctl) return ret; switch (cmd) { diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index b8f1719d7c0..6837a8ef937 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -626,6 +626,7 @@ mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) * mpt_register - Register protocol-specific main callback handler. * @cbfunc: callback function pointer * @dclass: Protocol driver's class (%MPT_DRIVER_CLASS enum value) + * @func_name: call function's name * * This routine is called by a protocol-specific driver (SCSI host, * LAN, SCSI target) to register its reply callback routine. Each @@ -6559,7 +6560,7 @@ procmpt_destroy(void) } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * Handles read request from /proc/mpt/summary or /proc/mpt/iocN/summary. */ static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int showlan); @@ -8003,6 +8004,7 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info) * mpt_sas_log_info - Log information returned from SAS IOC. * @ioc: Pointer to MPT_ADAPTER structure * @log_info: U32 LogInfo reply word from the IOC + * @cb_idx: callback function's handle * * Refer to lsi/mpi_log_sas.h. **/ @@ -8049,7 +8051,7 @@ union loginfo_type { code_desc = ir_code_str[sas_loginfo.dw.code]; if (sas_loginfo.dw.subcode >= ARRAY_SIZE(raid_sub_code_str)) - break; + break; if (sas_loginfo.dw.code == 0) sub_code_desc = raid_sub_code_str[sas_loginfo.dw.subcode]; diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 0efe631e50c..d80cfdc8edd 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -86,7 +86,9 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) init_waitqueue_head(&host->wq); INIT_DELAYED_WORK(&host->detect, mmc_rescan); INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable); +#ifdef CONFIG_PM host->pm_notify.notifier_call = mmc_pm_notify; +#endif /* * By default, hosts do not support SGIO or large requests. diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index c997474c649..68d12794cfd 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -132,7 +132,7 @@ config MMC_SDHCI_CNS3XXX config MMC_SDHCI_S3C tristate "SDHCI support on Samsung S3C SoC" - depends on MMC_SDHCI && (PLAT_S3C24XX || PLAT_S3C64XX) + depends on MMC_SDHCI && PLAT_SAMSUNG help This selects the Secure Digital Host Controller Interface (SDHCI) often referrered to as the HSMMC block in some of the Samsung S3C @@ -256,12 +256,13 @@ config MMC_IMX If unsure, say N. -config MMC_MSM7X00A - tristate "Qualcomm MSM 7X00A SDCC Controller Support" - depends on MMC && ARCH_MSM && !ARCH_MSM7X30 +config MMC_MSM + tristate "Qualcomm SDCC Controller Support" + depends on MMC && ARCH_MSM help This provides support for the SD/MMC cell found in the - MSM 7X00A controllers from Qualcomm. + MSM and QSD SOCs from Qualcomm. The controller also has + support for SDIO devices. config MMC_MXC tristate "Freescale i.MX2/3 Multimedia Card Interface support" diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index fe0ba4e2b8b..840bcb52d82 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -21,7 +21,7 @@ obj-$(CONFIG_MMC_OMAP_HS) += omap_hsmmc.o obj-$(CONFIG_MMC_AT91) += at91_mci.o obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o -obj-$(CONFIG_MMC_MSM7X00A) += msm_sdcc.o +obj-$(CONFIG_MMC_MSM) += msm_sdcc.o obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o obj-$(CONFIG_MMC_DAVINCI) += davinci_mmc.o obj-$(CONFIG_MMC_SPI) += mmc_spi.o diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 1145ea0792e..62a35822003 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -182,7 +182,7 @@ mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len) host->data_dma, sizeof(*host->data), DMA_FROM_DEVICE); - status = spi_sync(host->spi, &host->readback); + status = spi_sync_locked(host->spi, &host->readback); if (host->dma_dev) dma_sync_single_for_cpu(host->dma_dev, @@ -541,7 +541,7 @@ mmc_spi_command_send(struct mmc_spi_host *host, host->data_dma, sizeof(*host->data), DMA_BIDIRECTIONAL); } - status = spi_sync(host->spi, &host->m); + status = spi_sync_locked(host->spi, &host->m); if (host->dma_dev) dma_sync_single_for_cpu(host->dma_dev, @@ -685,7 +685,7 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t, host->data_dma, sizeof(*scratch), DMA_BIDIRECTIONAL); - status = spi_sync(spi, &host->m); + status = spi_sync_locked(spi, &host->m); if (status != 0) { dev_dbg(&spi->dev, "write error (%d)\n", status); @@ -822,7 +822,7 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t, DMA_FROM_DEVICE); } - status = spi_sync(spi, &host->m); + status = spi_sync_locked(spi, &host->m); if (host->dma_dev) { dma_sync_single_for_cpu(host->dma_dev, @@ -1018,7 +1018,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd, host->data_dma, sizeof(*scratch), DMA_BIDIRECTIONAL); - tmp = spi_sync(spi, &host->m); + tmp = spi_sync_locked(spi, &host->m); if (host->dma_dev) dma_sync_single_for_cpu(host->dma_dev, @@ -1084,6 +1084,9 @@ static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq) } #endif + /* request exclusive bus access */ + spi_bus_lock(host->spi->master); + /* issue command; then optionally data and stop */ status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL); if (status == 0 && mrq->data) { @@ -1094,6 +1097,9 @@ static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq) mmc_cs_off(host); } + /* release the bus */ + spi_bus_unlock(host->spi->master); + mmc_request_done(host->mmc, mrq); } @@ -1290,23 +1296,6 @@ mmc_spi_detect_irq(int irq, void *mmc) return IRQ_HANDLED; } -struct count_children { - unsigned n; - struct bus_type *bus; -}; - -static int maybe_count_child(struct device *dev, void *c) -{ - struct count_children *ccp = c; - - if (dev->bus == ccp->bus) { - if (ccp->n) - return -EBUSY; - ccp->n++; - } - return 0; -} - static int mmc_spi_probe(struct spi_device *spi) { void *ones; @@ -1338,32 +1327,6 @@ static int mmc_spi_probe(struct spi_device *spi) return status; } - /* We can use the bus safely iff nobody else will interfere with us. - * Most commands consist of one SPI message to issue a command, then - * several more to collect its response, then possibly more for data - * transfer. Clocking access to other devices during that period will - * corrupt the command execution. - * - * Until we have software primitives which guarantee non-interference, - * we'll aim for a hardware-level guarantee. - * - * REVISIT we can't guarantee another device won't be added later... - */ - if (spi->master->num_chipselect > 1) { - struct count_children cc; - - cc.n = 0; - cc.bus = spi->dev.bus; - status = device_for_each_child(spi->dev.parent, &cc, - maybe_count_child); - if (status < 0) { - dev_err(&spi->dev, "can't share SPI bus\n"); - return status; - } - - dev_warn(&spi->dev, "ASSUMING SPI bus stays unshared!\n"); - } - /* We need a supply of ones to transmit. This is the only time * the CPU touches these, so cache coherency isn't a concern. * diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c index 6824917f5c6..ff7752348b1 100644 --- a/drivers/mmc/host/msm_sdcc.c +++ b/drivers/mmc/host/msm_sdcc.c @@ -160,18 +160,7 @@ msmsdcc_stop_data(struct msmsdcc_host *host) uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host) { - switch (host->pdev_id) { - case 1: - return MSM_SDC1_PHYS + MMCIFIFO; - case 2: - return MSM_SDC2_PHYS + MMCIFIFO; - case 3: - return MSM_SDC3_PHYS + MMCIFIFO; - case 4: - return MSM_SDC4_PHYS + MMCIFIFO; - } - BUG(); - return 0; + return host->memres->start + MMCIFIFO; } static inline void @@ -1289,6 +1278,24 @@ msmsdcc_probe(struct platform_device *pdev) return ret; } +#ifdef CONFIG_PM +#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ +static void +do_resume_work(struct work_struct *work) +{ + struct msmsdcc_host *host = + container_of(work, struct msmsdcc_host, resume_task); + struct mmc_host *mmc = host->mmc; + + if (mmc) { + mmc_resume_host(mmc); + if (host->stat_irq) + enable_irq(host->stat_irq); + } +} +#endif + + static int msmsdcc_suspend(struct platform_device *dev, pm_message_t state) { @@ -1333,6 +1340,10 @@ msmsdcc_resume(struct platform_device *dev) } return 0; } +#else +#define msmsdcc_suspend 0 +#define msmsdcc_resume 0 +#endif static struct platform_driver msmsdcc_driver = { .probe = msmsdcc_probe, diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 0a7f2614c6f..71ad4163b95 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -242,7 +242,7 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state) { struct sdhci_host *host = platform_get_drvdata(dev); if (host) { - mutex_lock(&host->lock); + spin_lock(&host->lock); if (state) { dev_dbg(&dev->dev, "card inserted.\n"); host->flags &= ~SDHCI_DEVICE_DEAD; @@ -252,8 +252,8 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state) host->flags |= SDHCI_DEVICE_DEAD; host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; } - sdhci_card_detect(host); - mutex_unlock(&host->lock); + tasklet_schedule(&host->card_tasklet); + spin_unlock(&host->lock); } } diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 785512133b5..401527d273b 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1180,7 +1180,8 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) else ctrl &= ~SDHCI_CTRL_4BITBUS; - if (ios->timing == MMC_TIMING_SD_HS) + if (ios->timing == MMC_TIMING_SD_HS && + !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) ctrl |= SDHCI_CTRL_HISPD; else ctrl &= ~SDHCI_CTRL_HISPD; diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 036cfae7636..d316bc79b63 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -245,6 +245,8 @@ struct sdhci_host { #define SDHCI_QUIRK_MISSING_CAPS (1<<27) /* Controller uses Auto CMD12 command to stop the transfer */ #define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28) +/* Controller doesn't have HISPD bit field in HI-SPEED SD card */ +#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29) int irq; /* Device IRQ */ void __iomem * ioaddr; /* Mapped address */ diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index f90941a785e..6f512b5c117 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -347,8 +347,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len, spi_message_add_tail(&t[1], &m); /* Byte count starts at zero. */ - if (retlen) - *retlen = 0; + *retlen = 0; mutex_lock(&flash->lock); @@ -394,8 +393,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len, dev_name(&flash->spi->dev), __func__, "to", (u32)to, len); - if (retlen) - *retlen = 0; + *retlen = 0; /* sanity checks */ if (!len) @@ -466,8 +464,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len, spi_sync(flash->spi, &m); - if (retlen) - *retlen += m.actual_length - m25p_cmdsz(flash); + *retlen += m.actual_length - m25p_cmdsz(flash); } } @@ -485,8 +482,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, size_t actual; int cmd_sz, ret; - if (retlen) - *retlen = 0; + *retlen = 0; /* sanity checks */ if (!len) @@ -797,7 +793,7 @@ static int __devinit m25p_probe(struct spi_device *spi) break; } - if (plat_id) + if (i < ARRAY_SIZE(m25p_ids) - 1) id = plat_id; else dev_warn(&spi->dev, "unrecognized id %s\n", data->type); diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index 00af55d7afb..fe63f6bd663 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c @@ -22,6 +22,7 @@ #include <linux/mtd/partitions.h> #include <linux/mtd/concat.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/slab.h> diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c index 618fb42b86b..532fe07cf88 100644 --- a/drivers/mtd/nand/denali.c +++ b/drivers/mtd/nand/denali.c @@ -56,7 +56,7 @@ MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting." INTR_STATUS0__ERASE_COMP) /* indicates whether or not the internal value for the flash bank is - valid or not */ + * valid or not */ #define CHIP_SELECT_INVALID -1 #define SUPPORT_8BITECC 1 @@ -71,7 +71,7 @@ MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting." #define mtd_to_denali(m) container_of(m, struct denali_nand_info, mtd) /* These constants are defined by the driver to enable common driver - configuration options. */ + * configuration options. */ #define SPARE_ACCESS 0x41 #define MAIN_ACCESS 0x42 #define MAIN_SPARE_ACCESS 0x43 @@ -97,7 +97,7 @@ static const struct pci_device_id denali_pci_ids[] = { /* these are static lookup tables that give us easy access to - registers in the NAND controller. + * registers in the NAND controller. */ static const uint32_t intr_status_addresses[4] = {INTR_STATUS0, INTR_STATUS1, @@ -119,9 +119,6 @@ static const uint32_t reset_complete[4] = {INTR_STATUS0__RST_COMP, INTR_STATUS2__RST_COMP, INTR_STATUS3__RST_COMP}; -/* specifies the debug level of the driver */ -static int nand_debug_level; - /* forward declarations */ static void clear_interrupts(struct denali_nand_info *denali); static uint32_t wait_for_irq(struct denali_nand_info *denali, @@ -130,22 +127,6 @@ static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask); static uint32_t read_interrupt_status(struct denali_nand_info *denali); -#define DEBUG_DENALI 0 - -/* This is a wrapper for writing to the denali registers. - * this allows us to create debug information so we can - * observe how the driver is programming the device. - * it uses standard linux convention for (val, addr) */ -static void denali_write32(uint32_t value, void *addr) -{ - iowrite32(value, addr); - -#if DEBUG_DENALI - printk(KERN_INFO "wrote: 0x%x -> 0x%x\n", value, - (uint32_t)((uint32_t)addr & 0x1fff)); -#endif -} - /* Certain operations for the denali NAND controller use * an indexed mode to read/write data. The operation is * performed by writing the address value of the command @@ -155,15 +136,15 @@ static void denali_write32(uint32_t value, void *addr) static void index_addr(struct denali_nand_info *denali, uint32_t address, uint32_t data) { - denali_write32(address, denali->flash_mem); - denali_write32(data, denali->flash_mem + 0x10); + iowrite32(address, denali->flash_mem); + iowrite32(data, denali->flash_mem + 0x10); } /* Perform an indexed read of the device */ static void index_addr_read_data(struct denali_nand_info *denali, uint32_t address, uint32_t *pdata) { - denali_write32(address, denali->flash_mem); + iowrite32(address, denali->flash_mem); *pdata = ioread32(denali->flash_mem + 0x10); } @@ -188,18 +169,11 @@ static void read_status(struct denali_nand_info *denali) /* initialize the data buffer to store status */ reset_buf(denali); - /* initiate a device status read */ - cmd = MODE_11 | BANK(denali->flash_bank); - index_addr(denali, cmd | COMMAND_CYCLE, 0x70); - denali_write32(cmd | STATUS_CYCLE, denali->flash_mem); - - /* update buffer with status value */ - write_byte_to_buf(denali, ioread32(denali->flash_mem + 0x10)); - -#if DEBUG_DENALI - printk(KERN_INFO "device reporting status value of 0x%2x\n", - denali->buf.buf[0]); -#endif + cmd = ioread32(denali->flash_reg + WRITE_PROTECT); + if (cmd) + write_byte_to_buf(denali, NAND_STATUS_WP); + else + write_byte_to_buf(denali, 0); } /* resets a specific device connected to the core */ @@ -213,12 +187,12 @@ static void reset_bank(struct denali_nand_info *denali) clear_interrupts(denali); bank = device_reset_banks[denali->flash_bank]; - denali_write32(bank, denali->flash_reg + DEVICE_RESET); + iowrite32(bank, denali->flash_reg + DEVICE_RESET); irq_status = wait_for_irq(denali, irq_mask); if (irq_status & operation_timeout[denali->flash_bank]) - printk(KERN_ERR "reset bank failed.\n"); + dev_err(&denali->dev->dev, "reset bank failed.\n"); } /* Reset the flash controller */ @@ -226,28 +200,28 @@ static uint16_t denali_nand_reset(struct denali_nand_info *denali) { uint32_t i; - nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", + dev_dbg(&denali->dev->dev, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) - denali_write32(reset_complete[i] | operation_timeout[i], + iowrite32(reset_complete[i] | operation_timeout[i], denali->flash_reg + intr_status_addresses[i]); for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) { - denali_write32(device_reset_banks[i], + iowrite32(device_reset_banks[i], denali->flash_reg + DEVICE_RESET); while (!(ioread32(denali->flash_reg + - intr_status_addresses[i]) & + intr_status_addresses[i]) & (reset_complete[i] | operation_timeout[i]))) - ; + cpu_relax(); if (ioread32(denali->flash_reg + intr_status_addresses[i]) & operation_timeout[i]) - nand_dbg_print(NAND_DBG_WARN, + dev_dbg(&denali->dev->dev, "NAND Reset operation timed out on bank %d\n", i); } for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) - denali_write32(reset_complete[i] | operation_timeout[i], + iowrite32(reset_complete[i] | operation_timeout[i], denali->flash_reg + intr_status_addresses[i]); return PASS; @@ -280,7 +254,7 @@ static void nand_onfi_timing_set(struct denali_nand_info *denali, uint16_t acc_clks; uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt; - nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", + dev_dbg(&denali->dev->dev, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); en_lo = CEIL_DIV(Trp[mode], CLK_X); @@ -317,7 +291,7 @@ static void nand_onfi_timing_set(struct denali_nand_info *denali, acc_clks++; if ((data_invalid - acc_clks * CLK_X) < 2) - nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n", + dev_warn(&denali->dev->dev, "%s, Line %d: Warning!\n", __FILE__, __LINE__); addr_2_data = CEIL_DIV(Tadl[mode], CLK_X); @@ -345,14 +319,14 @@ static void nand_onfi_timing_set(struct denali_nand_info *denali, (ioread32(denali->flash_reg + DEVICE_ID) == 0x88)) acc_clks = 6; - denali_write32(acc_clks, denali->flash_reg + ACC_CLKS); - denali_write32(re_2_we, denali->flash_reg + RE_2_WE); - denali_write32(re_2_re, denali->flash_reg + RE_2_RE); - denali_write32(we_2_re, denali->flash_reg + WE_2_RE); - denali_write32(addr_2_data, denali->flash_reg + ADDR_2_DATA); - denali_write32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT); - denali_write32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT); - denali_write32(cs_cnt, denali->flash_reg + CS_SETUP_CNT); + iowrite32(acc_clks, denali->flash_reg + ACC_CLKS); + iowrite32(re_2_we, denali->flash_reg + RE_2_WE); + iowrite32(re_2_re, denali->flash_reg + RE_2_RE); + iowrite32(we_2_re, denali->flash_reg + WE_2_RE); + iowrite32(addr_2_data, denali->flash_reg + ADDR_2_DATA); + iowrite32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT); + iowrite32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT); + iowrite32(cs_cnt, denali->flash_reg + CS_SETUP_CNT); } /* queries the NAND device to see what ONFI modes it supports. */ @@ -387,13 +361,13 @@ static void get_samsung_nand_para(struct denali_nand_info *denali, { if (device_id == 0xd3) { /* Samsung K9WAG08U1A */ /* Set timing register values according to datasheet */ - denali_write32(5, denali->flash_reg + ACC_CLKS); - denali_write32(20, denali->flash_reg + RE_2_WE); - denali_write32(12, denali->flash_reg + WE_2_RE); - denali_write32(14, denali->flash_reg + ADDR_2_DATA); - denali_write32(3, denali->flash_reg + RDWR_EN_LO_CNT); - denali_write32(2, denali->flash_reg + RDWR_EN_HI_CNT); - denali_write32(2, denali->flash_reg + CS_SETUP_CNT); + iowrite32(5, denali->flash_reg + ACC_CLKS); + iowrite32(20, denali->flash_reg + RE_2_WE); + iowrite32(12, denali->flash_reg + WE_2_RE); + iowrite32(14, denali->flash_reg + ADDR_2_DATA); + iowrite32(3, denali->flash_reg + RDWR_EN_LO_CNT); + iowrite32(2, denali->flash_reg + RDWR_EN_HI_CNT); + iowrite32(2, denali->flash_reg + CS_SETUP_CNT); } } @@ -405,15 +379,15 @@ static void get_toshiba_nand_para(struct denali_nand_info *denali) /* spare area size for some kind of Toshiba NAND device */ if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) && (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) { - denali_write32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); + iowrite32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) * ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE); - denali_write32(tmp, + iowrite32(tmp, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); #if SUPPORT_15BITECC - denali_write32(15, denali->flash_reg + ECC_CORRECTION); + iowrite32(15, denali->flash_reg + ECC_CORRECTION); #elif SUPPORT_8BITECC - denali_write32(8, denali->flash_reg + ECC_CORRECTION); + iowrite32(8, denali->flash_reg + ECC_CORRECTION); #endif } } @@ -426,26 +400,26 @@ static void get_hynix_nand_para(struct denali_nand_info *denali, switch (device_id) { case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */ case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */ - denali_write32(128, denali->flash_reg + PAGES_PER_BLOCK); - denali_write32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE); - denali_write32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); + iowrite32(128, denali->flash_reg + PAGES_PER_BLOCK); + iowrite32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE); + iowrite32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); main_size = 4096 * ioread32(denali->flash_reg + DEVICES_CONNECTED); spare_size = 224 * ioread32(denali->flash_reg + DEVICES_CONNECTED); - denali_write32(main_size, + iowrite32(main_size, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE); - denali_write32(spare_size, + iowrite32(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); - denali_write32(0, denali->flash_reg + DEVICE_WIDTH); + iowrite32(0, denali->flash_reg + DEVICE_WIDTH); #if SUPPORT_15BITECC - denali_write32(15, denali->flash_reg + ECC_CORRECTION); + iowrite32(15, denali->flash_reg + ECC_CORRECTION); #elif SUPPORT_8BITECC - denali_write32(8, denali->flash_reg + ECC_CORRECTION); + iowrite32(8, denali->flash_reg + ECC_CORRECTION); #endif break; default: - nand_dbg_print(NAND_DBG_WARN, + dev_warn(&denali->dev->dev, "Spectra: Unknown Hynix NAND (Device ID: 0x%x)." "Will use default parameter values instead.\n", device_id); @@ -453,7 +427,7 @@ static void get_hynix_nand_para(struct denali_nand_info *denali, } /* determines how many NAND chips are connected to the controller. Note for - Intel CE4100 devices we don't support more than one device. + * Intel CE4100 devices we don't support more than one device. */ static void find_valid_banks(struct denali_nand_info *denali) { @@ -467,7 +441,7 @@ static void find_valid_banks(struct denali_nand_info *denali) index_addr_read_data(denali, (uint32_t)(MODE_11 | (i << 24) | 2), &id[i]); - nand_dbg_print(NAND_DBG_DEBUG, + dev_dbg(&denali->dev->dev, "Return 1st ID for bank[%d]: %x\n", i, id[i]); if (i == 0) { @@ -487,12 +461,13 @@ static void find_valid_banks(struct denali_nand_info *denali) * Multichip support is not enabled. */ if (denali->total_used_banks != 1) { - printk(KERN_ERR "Sorry, Intel CE4100 only supports " + dev_err(&denali->dev->dev, + "Sorry, Intel CE4100 only supports " "a single NAND device.\n"); BUG(); } } - nand_dbg_print(NAND_DBG_DEBUG, + dev_dbg(&denali->dev->dev, "denali->total_used_banks: %d\n", denali->total_used_banks); } @@ -526,8 +501,9 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali) uint32_t id_bytes[5], addr; uint8_t i, maf_id, device_id; - nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", - __FILE__, __LINE__, __func__); + dev_dbg(&denali->dev->dev, + "%s, Line %d, Function: %s\n", + __FILE__, __LINE__, __func__); /* Use read id method to get device ID and other * params. For some NAND chips, controller can't @@ -554,12 +530,14 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali) get_hynix_nand_para(denali, device_id); } - nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:" - "acc_clks: %d, re_2_we: %d, we_2_re: %d," - "addr_2_data: %d, rdwr_en_lo_cnt: %d, " + dev_info(&denali->dev->dev, + "Dump timing register values:" + "acc_clks: %d, re_2_we: %d, re_2_re: %d\n" + "we_2_re: %d, addr_2_data: %d, rdwr_en_lo_cnt: %d\n" "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n", ioread32(denali->flash_reg + ACC_CLKS), ioread32(denali->flash_reg + RE_2_WE), + ioread32(denali->flash_reg + RE_2_RE), ioread32(denali->flash_reg + WE_2_RE), ioread32(denali->flash_reg + ADDR_2_DATA), ioread32(denali->flash_reg + RDWR_EN_LO_CNT), @@ -582,17 +560,17 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali) static void denali_set_intr_modes(struct denali_nand_info *denali, uint16_t INT_ENABLE) { - nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", + dev_dbg(&denali->dev->dev, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); if (INT_ENABLE) - denali_write32(1, denali->flash_reg + GLOBAL_INT_ENABLE); + iowrite32(1, denali->flash_reg + GLOBAL_INT_ENABLE); else - denali_write32(0, denali->flash_reg + GLOBAL_INT_ENABLE); + iowrite32(0, denali->flash_reg + GLOBAL_INT_ENABLE); } /* validation function to verify that the controlling software is making - a valid request + * a valid request */ static inline bool is_flash_bank_valid(int flash_bank) { @@ -609,10 +587,10 @@ static void denali_irq_init(struct denali_nand_info *denali) int_mask = DENALI_IRQ_ALL; /* Clear all status bits */ - denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS0); - denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS1); - denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS2); - denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS3); + iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS0); + iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS1); + iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS2); + iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS3); denali_irq_enable(denali, int_mask); } @@ -626,10 +604,10 @@ static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali) static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask) { - denali_write32(int_mask, denali->flash_reg + INTR_EN0); - denali_write32(int_mask, denali->flash_reg + INTR_EN1); - denali_write32(int_mask, denali->flash_reg + INTR_EN2); - denali_write32(int_mask, denali->flash_reg + INTR_EN3); + iowrite32(int_mask, denali->flash_reg + INTR_EN0); + iowrite32(int_mask, denali->flash_reg + INTR_EN1); + iowrite32(int_mask, denali->flash_reg + INTR_EN2); + iowrite32(int_mask, denali->flash_reg + INTR_EN3); } /* This function only returns when an interrupt that this driver cares about @@ -648,7 +626,7 @@ static inline void clear_interrupt(struct denali_nand_info *denali, intr_status_reg = intr_status_addresses[denali->flash_bank]; - denali_write32(irq_mask, denali->flash_reg + intr_status_reg); + iowrite32(irq_mask, denali->flash_reg + intr_status_reg); } static void clear_interrupts(struct denali_nand_info *denali) @@ -657,11 +635,7 @@ static void clear_interrupts(struct denali_nand_info *denali) spin_lock_irq(&denali->irq_lock); status = read_interrupt_status(denali); - -#if DEBUG_DENALI - denali->irq_debug_array[denali->idx++] = 0x30000000 | status; - denali->idx %= 32; -#endif + clear_interrupt(denali, status); denali->irq_status = 0x0; spin_unlock_irq(&denali->irq_lock); @@ -676,17 +650,6 @@ static uint32_t read_interrupt_status(struct denali_nand_info *denali) return ioread32(denali->flash_reg + intr_status_reg); } -#if DEBUG_DENALI -static void print_irq_log(struct denali_nand_info *denali) -{ - int i = 0; - - printk(KERN_INFO "ISR debug log index = %X\n", denali->idx); - for (i = 0; i < 32; i++) - printk(KERN_INFO "%08X: %08X\n", i, denali->irq_debug_array[i]); -} -#endif - /* This is the interrupt service routine. It handles all interrupts * sent to this device. Note that on CE4100, this is a shared * interrupt. @@ -707,13 +670,6 @@ static irqreturn_t denali_isr(int irq, void *dev_id) * the interrupt, since this is a shared interrupt */ irq_status = denali_irq_detected(denali); if (irq_status != 0) { -#if DEBUG_DENALI - denali->irq_debug_array[denali->idx++] = - 0x10000000 | irq_status; - denali->idx %= 32; - - printk(KERN_INFO "IRQ status = 0x%04x\n", irq_status); -#endif /* handle interrupt */ /* first acknowledge it */ clear_interrupt(denali, irq_status); @@ -739,41 +695,20 @@ static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask) unsigned long timeout = msecs_to_jiffies(1000); do { -#if DEBUG_DENALI - printk(KERN_INFO "waiting for 0x%x\n", irq_mask); -#endif comp_res = wait_for_completion_timeout(&denali->complete, timeout); spin_lock_irq(&denali->irq_lock); intr_status = denali->irq_status; -#if DEBUG_DENALI - denali->irq_debug_array[denali->idx++] = - 0x20000000 | (irq_mask << 16) | intr_status; - denali->idx %= 32; -#endif - if (intr_status & irq_mask) { denali->irq_status &= ~irq_mask; spin_unlock_irq(&denali->irq_lock); -#if DEBUG_DENALI - if (retry) - printk(KERN_INFO "status on retry = 0x%x\n", - intr_status); -#endif /* our interrupt was detected */ break; } else { /* these are not the interrupts you are looking for - * need to wait again */ spin_unlock_irq(&denali->irq_lock); -#if DEBUG_DENALI - print_irq_log(denali); - printk(KERN_INFO "received irq nobody cared:" - " irq_status = 0x%x, irq_mask = 0x%x," - " timeout = %ld\n", intr_status, - irq_mask, comp_res); -#endif retry = true; } } while (comp_res != 0); @@ -789,7 +724,7 @@ static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask) } /* This helper function setups the registers for ECC and whether or not - the spare area will be transfered. */ + * the spare area will be transfered. */ static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en, bool transfer_spare) { @@ -800,13 +735,13 @@ static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en, transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0; /* Enable spare area/ECC per user's request. */ - denali_write32(ecc_en_flag, denali->flash_reg + ECC_ENABLE); - denali_write32(transfer_spare_flag, + iowrite32(ecc_en_flag, denali->flash_reg + ECC_ENABLE); + iowrite32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG); } /* sends a pipeline command operation to the controller. See the Denali NAND - controller's user guide for more information (section 4.2.3.6). + * controller's user guide for more information (section 4.2.3.6). */ static int denali_send_pipeline_cmd(struct denali_nand_info *denali, bool ecc_en, @@ -827,16 +762,6 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali, setup_ecc_for_xfer(denali, ecc_en, transfer_spare); -#if DEBUG_DENALI - spin_lock_irq(&denali->irq_lock); - denali->irq_debug_array[denali->idx++] = - 0x40000000 | ioread32(denali->flash_reg + ECC_ENABLE) | - (access_type << 4); - denali->idx %= 32; - spin_unlock_irq(&denali->irq_lock); -#endif - - /* clear interrupts */ clear_interrupts(denali); @@ -844,14 +769,14 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali, if (op == DENALI_WRITE && access_type != SPARE_ACCESS) { cmd = MODE_01 | addr; - denali_write32(cmd, denali->flash_mem); + iowrite32(cmd, denali->flash_mem); } else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) { /* read spare area */ cmd = MODE_10 | addr; index_addr(denali, (uint32_t)cmd, access_type); cmd = MODE_01 | addr; - denali_write32(cmd, denali->flash_mem); + iowrite32(cmd, denali->flash_mem); } else if (op == DENALI_READ) { /* setup page read request for access type */ cmd = MODE_10 | addr; @@ -863,7 +788,7 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali, */ if (access_type == SPARE_ACCESS) { cmd = MODE_01 | addr; - denali_write32(cmd, denali->flash_mem); + iowrite32(cmd, denali->flash_mem); } else { index_addr(denali, (uint32_t)cmd, 0x2000 | op | page_count); @@ -875,13 +800,14 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali, irq_status = wait_for_irq(denali, irq_mask); if (irq_status == 0) { - printk(KERN_ERR "cmd, page, addr on timeout " - "(0x%x, 0x%x, 0x%x)\n", cmd, - denali->page, addr); + dev_err(&denali->dev->dev, + "cmd, page, addr on timeout " + "(0x%x, 0x%x, 0x%x)\n", + cmd, denali->page, addr); status = FAIL; } else { cmd = MODE_01 | addr; - denali_write32(cmd, denali->flash_mem); + iowrite32(cmd, denali->flash_mem); } } } @@ -902,7 +828,7 @@ static int write_data_to_flash_mem(struct denali_nand_info *denali, /* write the data to the flash memory */ buf32 = (uint32_t *)buf; for (i = 0; i < len / 4; i++) - denali_write32(*buf32++, denali->flash_mem + 0x10); + iowrite32(*buf32++, denali->flash_mem + 0x10); return i*4; /* intent is to return the number of bytes read */ } @@ -945,24 +871,15 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) DENALI_WRITE) == PASS) { write_data_to_flash_mem(denali, buf, mtd->oobsize); -#if DEBUG_DENALI - spin_lock_irq(&denali->irq_lock); - denali->irq_debug_array[denali->idx++] = - 0x80000000 | mtd->oobsize; - denali->idx %= 32; - spin_unlock_irq(&denali->irq_lock); -#endif - - /* wait for operation to complete */ irq_status = wait_for_irq(denali, irq_mask); if (irq_status == 0) { - printk(KERN_ERR "OOB write failed\n"); + dev_err(&denali->dev->dev, "OOB write failed\n"); status = -EIO; } } else { - printk(KERN_ERR "unable to send pipeline command\n"); + dev_err(&denali->dev->dev, "unable to send pipeline command\n"); status = -EIO; } return status; @@ -977,9 +894,6 @@ static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) denali->page = page; -#if DEBUG_DENALI - printk(KERN_INFO "read_oob %d\n", page); -#endif if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS, DENALI_READ) == PASS) { read_data_from_flash_mem(denali, buf, mtd->oobsize); @@ -990,7 +904,7 @@ static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) irq_status = wait_for_irq(denali, irq_mask); if (irq_status == 0) - printk(KERN_ERR "page on OOB timeout %d\n", + dev_err(&denali->dev->dev, "page on OOB timeout %d\n", denali->page); /* We set the device back to MAIN_ACCESS here as I observed @@ -1002,14 +916,6 @@ static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) addr = BANK(denali->flash_bank) | denali->page; cmd = MODE_10 | addr; index_addr(denali, (uint32_t)cmd, MAIN_ACCESS); - -#if DEBUG_DENALI - spin_lock_irq(&denali->irq_lock); - denali->irq_debug_array[denali->idx++] = - 0x60000000 | mtd->oobsize; - denali->idx %= 32; - spin_unlock_irq(&denali->irq_lock); -#endif } } @@ -1029,12 +935,12 @@ bool is_erased(uint8_t *buf, int len) #define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12) #define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET)) #define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK) -#define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO)) -#define ECC_ERR_DEVICE(x) ((x) & ERR_CORRECTION_INFO__DEVICE_NR >> 8) +#define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO__ERROR_TYPE)) +#define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8) #define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO) static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf, - uint8_t *oobbuf, uint32_t irq_status) + uint32_t irq_status) { bool check_erased_page = false; @@ -1043,6 +949,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf, uint32_t err_address = 0, err_correction_info = 0; uint32_t err_byte = 0, err_sector = 0, err_device = 0; uint32_t err_correction_value = 0; + denali_set_intr_modes(denali, false); do { err_address = ioread32(denali->flash_reg + @@ -1050,7 +957,6 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf, err_sector = ECC_SECTOR(err_address); err_byte = ECC_BYTE(err_address); - err_correction_info = ioread32(denali->flash_reg + ERR_CORRECTION_INFO); err_correction_value = @@ -1058,20 +964,23 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf, err_device = ECC_ERR_DEVICE(err_correction_info); if (ECC_ERROR_CORRECTABLE(err_correction_info)) { - /* offset in our buffer is computed as: - sector number * sector size + offset in - sector - */ - int offset = err_sector * ECC_SECTOR_SIZE + - err_byte; - if (offset < denali->mtd.writesize) { + /* If err_byte is larger than ECC_SECTOR_SIZE, + * means error happend in OOB, so we ignore + * it. It's no need for us to correct it + * err_device is represented the NAND error + * bits are happened in if there are more + * than one NAND connected. + * */ + if (err_byte < ECC_SECTOR_SIZE) { + int offset; + offset = (err_sector * + ECC_SECTOR_SIZE + + err_byte) * + denali->devnum + + err_device; /* correct the ECC error */ buf[offset] ^= err_correction_value; denali->mtd.ecc_stats.corrected++; - } else { - /* bummer, couldn't correct the error */ - printk(KERN_ERR "ECC offset invalid\n"); - denali->mtd.ecc_stats.failed++; } } else { /* if the error is not correctable, need to @@ -1080,14 +989,16 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf, * */ check_erased_page = true; } - -#if DEBUG_DENALI - printk(KERN_INFO "Detected ECC error in page %d:" - " err_addr = 0x%08x, info to fix is" - " 0x%08x\n", denali->page, err_address, - err_correction_info); -#endif } while (!ECC_LAST_ERR(err_correction_info)); + /* Once handle all ecc errors, controller will triger + * a ECC_TRANSACTION_DONE interrupt, so here just wait + * for a while for this interrupt + * */ + while (!(read_interrupt_status(denali) & + INTR_STATUS0__ECC_TRANSACTION_DONE)) + cpu_relax(); + clear_interrupts(denali); + denali_set_intr_modes(denali, true); } return check_erased_page; } @@ -1100,7 +1011,7 @@ static void denali_enable_dma(struct denali_nand_info *denali, bool en) if (en) reg_val = DMA_ENABLE__FLAG; - denali_write32(reg_val, denali->flash_reg + DMA_ENABLE); + iowrite32(reg_val, denali->flash_reg + DMA_ENABLE); ioread32(denali->flash_reg + DMA_ENABLE); } @@ -1129,7 +1040,7 @@ static void denali_setup_dma(struct denali_nand_info *denali, int op) } /* writes a page. user specifies type, and this function handles the - configuration details. */ + * configuration details. */ static void write_page(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf, bool raw_xfer) { @@ -1171,8 +1082,9 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip, irq_status = wait_for_irq(denali, irq_mask); if (irq_status == 0) { - printk(KERN_ERR "timeout on write_page" - " (type = %d)\n", raw_xfer); + dev_err(&denali->dev->dev, + "timeout on write_page (type = %d)\n", + raw_xfer); denali->status = (irq_status & INTR_STATUS0__PROGRAM_FAIL) ? NAND_STATUS_FAIL : PASS; @@ -1185,8 +1097,9 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip, /* NAND core entry points */ /* this is the callback that the NAND core calls to write a page. Since - writing a page with ECC or without is similar, all the work is done - by write_page above. */ + * writing a page with ECC or without is similar, all the work is done + * by write_page above. + * */ static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf) { @@ -1196,8 +1109,8 @@ static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, } /* This is the callback that the NAND core calls to write a page without ECC. - raw access is similiar to ECC page writes, so all the work is done in the - write_page() function above. + * raw access is similiar to ECC page writes, so all the work is done in the + * write_page() function above. */ static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf) @@ -1236,6 +1149,13 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, INTR_STATUS0__ECC_ERR; bool check_erased_page = false; + if (page != denali->page) { + dev_err(&denali->dev->dev, "IN %s: page %d is not" + " equal to denali->page %d, investigate!!", + __func__, page, denali->page); + BUG(); + } + setup_ecc_for_xfer(denali, true, false); denali_enable_dma(denali, true); @@ -1251,7 +1171,7 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, memcpy(buf, denali->buf.buf, mtd->writesize); - check_erased_page = handle_ecc(denali, buf, chip->oob_poi, irq_status); + check_erased_page = handle_ecc(denali, buf, irq_status); denali_enable_dma(denali, false); if (check_erased_page) { @@ -1280,6 +1200,13 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, uint32_t irq_status = 0; uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP; + if (page != denali->page) { + dev_err(&denali->dev->dev, "IN %s: page %d is not" + " equal to denali->page %d, investigate!!", + __func__, page, denali->page); + BUG(); + } + setup_ecc_for_xfer(denali, false, true); denali_enable_dma(denali, true); @@ -1309,18 +1236,13 @@ static uint8_t denali_read_byte(struct mtd_info *mtd) if (denali->buf.head < denali->buf.tail) result = denali->buf.buf[denali->buf.head++]; -#if DEBUG_DENALI - printk(KERN_INFO "read byte -> 0x%02x\n", result); -#endif return result; } static void denali_select_chip(struct mtd_info *mtd, int chip) { struct denali_nand_info *denali = mtd_to_denali(mtd); -#if DEBUG_DENALI - printk(KERN_INFO "denali select chip %d\n", chip); -#endif + spin_lock_irq(&denali->irq_lock); denali->flash_bank = chip; spin_unlock_irq(&denali->irq_lock); @@ -1332,9 +1254,6 @@ static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip) int status = denali->status; denali->status = 0; -#if DEBUG_DENALI - printk(KERN_INFO "waitfunc %d\n", status); -#endif return status; } @@ -1344,9 +1263,6 @@ static void denali_erase(struct mtd_info *mtd, int page) uint32_t cmd = 0x0, irq_status = 0; -#if DEBUG_DENALI - printk(KERN_INFO "erase page: %d\n", page); -#endif /* clear interrupts */ clear_interrupts(denali); @@ -1369,9 +1285,6 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col, uint32_t addr, id; int i; -#if DEBUG_DENALI - printk(KERN_INFO "cmdfunc: 0x%x %d %d\n", cmd, col, page); -#endif switch (cmd) { case NAND_CMD_PAGEPROG: break; @@ -1415,7 +1328,9 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col, static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data, uint8_t *ecc_code) { - printk(KERN_ERR "denali_ecc_calculate called unexpectedly\n"); + struct denali_nand_info *denali = mtd_to_denali(mtd); + dev_err(&denali->dev->dev, + "denali_ecc_calculate called unexpectedly\n"); BUG(); return -EIO; } @@ -1423,14 +1338,18 @@ static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data, static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data, uint8_t *read_ecc, uint8_t *calc_ecc) { - printk(KERN_ERR "denali_ecc_correct called unexpectedly\n"); + struct denali_nand_info *denali = mtd_to_denali(mtd); + dev_err(&denali->dev->dev, + "denali_ecc_correct called unexpectedly\n"); BUG(); return -EIO; } static void denali_ecc_hwctl(struct mtd_info *mtd, int mode) { - printk(KERN_ERR "denali_ecc_hwctl called unexpectedly\n"); + struct denali_nand_info *denali = mtd_to_denali(mtd); + dev_err(&denali->dev->dev, + "denali_ecc_hwctl called unexpectedly\n"); BUG(); } /* end NAND core entry points */ @@ -1445,18 +1364,18 @@ static void denali_hw_init(struct denali_nand_info *denali) * */ denali->bbtskipbytes = ioread32(denali->flash_reg + SPARE_AREA_SKIP_BYTES); - denali_irq_init(denali); denali_nand_reset(denali); - denali_write32(0x0F, denali->flash_reg + RB_PIN_ENABLED); - denali_write32(CHIP_EN_DONT_CARE__FLAG, + iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED); + iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->flash_reg + CHIP_ENABLE_DONT_CARE); - denali_write32(0x0, denali->flash_reg + SPARE_AREA_SKIP_BYTES); - denali_write32(0xffff, denali->flash_reg + SPARE_AREA_MARKER); + iowrite32(0xffff, denali->flash_reg + SPARE_AREA_MARKER); /* Should set value for these registers when init */ - denali_write32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES); - denali_write32(1, denali->flash_reg + ECC_ENABLE); + iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES); + iowrite32(1, denali->flash_reg + ECC_ENABLE); + denali_nand_timing_set(denali); + denali_irq_init(denali); } /* Althogh controller spec said SLC ECC is forceb to be 4bit, @@ -1526,9 +1445,6 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) unsigned long csr_len, mem_len; struct denali_nand_info *denali; - nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", - __FILE__, __LINE__, __func__); - denali = kzalloc(sizeof(*denali), GFP_KERNEL); if (!denali) return -ENOMEM; @@ -1536,7 +1452,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) ret = pci_enable_device(dev); if (ret) { printk(KERN_ERR "Spectra: pci_enable_device failed.\n"); - goto failed_enable; + goto failed_alloc_memery; } if (id->driver_data == INTEL_CE4100) { @@ -1547,7 +1463,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) printk(KERN_ERR "Intel CE4100 only supports" " ONFI timing mode 1 or below\n"); ret = -EINVAL; - goto failed_enable; + goto failed_enable_dev; } denali->platform = INTEL_CE4100; mem_base = pci_resource_start(dev, 0); @@ -1557,17 +1473,12 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) } else { denali->platform = INTEL_MRST; csr_base = pci_resource_start(dev, 0); - csr_len = pci_resource_start(dev, 0); + csr_len = pci_resource_len(dev, 0); mem_base = pci_resource_start(dev, 1); mem_len = pci_resource_len(dev, 1); if (!mem_len) { mem_base = csr_base + csr_len; mem_len = csr_len; - nand_dbg_print(NAND_DBG_WARN, - "Spectra: No second" - " BAR for PCI device;" - " assuming %08Lx\n", - (uint64_t)csr_base); } } @@ -1576,7 +1487,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) if (ret) { printk(KERN_ERR "Spectra: no usable DMA configuration\n"); - goto failed_enable; + goto failed_enable_dev; } denali->buf.dma_buf = pci_map_single(dev, denali->buf.buf, @@ -1584,50 +1495,44 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(dev, denali->buf.dma_buf)) { - printk(KERN_ERR "Spectra: failed to map DMA buffer\n"); - goto failed_enable; + dev_err(&dev->dev, "Spectra: failed to map DMA buffer\n"); + goto failed_enable_dev; } pci_set_master(dev); denali->dev = dev; + denali->mtd.dev.parent = &dev->dev; ret = pci_request_regions(dev, DENALI_NAND_NAME); if (ret) { printk(KERN_ERR "Spectra: Unable to request memory regions\n"); - goto failed_req_csr; + goto failed_dma_map; } denali->flash_reg = ioremap_nocache(csr_base, csr_len); if (!denali->flash_reg) { printk(KERN_ERR "Spectra: Unable to remap memory region\n"); ret = -ENOMEM; - goto failed_remap_csr; + goto failed_req_regions; } - nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08Lx -> 0x%p (0x%lx)\n", - (uint64_t)csr_base, denali->flash_reg, csr_len); denali->flash_mem = ioremap_nocache(mem_base, mem_len); if (!denali->flash_mem) { printk(KERN_ERR "Spectra: ioremap_nocache failed!"); - iounmap(denali->flash_reg); ret = -ENOMEM; - goto failed_remap_csr; + goto failed_remap_reg; } - nand_dbg_print(NAND_DBG_WARN, - "Spectra: Remapped flash base address: " - "0x%p, len: %ld\n", - denali->flash_mem, csr_len); - denali_hw_init(denali); denali_drv_init(denali); - nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq); + /* denali_isr register is done after all the hardware + * initilization is finished*/ if (request_irq(dev->irq, denali_isr, IRQF_SHARED, DENALI_NAND_NAME, denali)) { printk(KERN_ERR "Spectra: Unable to allocate IRQ\n"); ret = -ENODEV; - goto failed_request_irq; + goto failed_remap_mem; } /* now that our ISR is registered, we can enable interrupts */ @@ -1635,21 +1540,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) pci_set_drvdata(dev, denali); - denali_nand_timing_set(denali); - - nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:" - "acc_clks: %d, re_2_we: %d, we_2_re: %d," - "addr_2_data: %d, rdwr_en_lo_cnt: %d, " - "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n", - ioread32(denali->flash_reg + ACC_CLKS), - ioread32(denali->flash_reg + RE_2_WE), - ioread32(denali->flash_reg + WE_2_RE), - ioread32(denali->flash_reg + ADDR_2_DATA), - ioread32(denali->flash_reg + RDWR_EN_LO_CNT), - ioread32(denali->flash_reg + RDWR_EN_HI_CNT), - ioread32(denali->flash_reg + CS_SETUP_CNT)); - - denali->mtd.name = "Denali NAND"; + denali->mtd.name = "denali-nand"; denali->mtd.owner = THIS_MODULE; denali->mtd.priv = &denali->nand; @@ -1664,7 +1555,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) * with the nand subsystem */ if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL)) { ret = -ENXIO; - goto failed_nand; + goto failed_req_irq; } /* MTD supported page sizes vary by kernel. We validate our @@ -1674,7 +1565,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) ret = -ENODEV; printk(KERN_ERR "Spectra: device size not supported by this " "version of MTD."); - goto failed_nand; + goto failed_req_irq; } /* support for multi nand @@ -1719,17 +1610,17 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) /* if MLC OOB size is large enough, use 15bit ECC*/ denali->nand.ecc.layout = &nand_15bit_oob; denali->nand.ecc.bytes = ECC_15BITS; - denali_write32(15, denali->flash_reg + ECC_CORRECTION); + iowrite32(15, denali->flash_reg + ECC_CORRECTION); } else if (denali->mtd.oobsize < (denali->bbtskipbytes + ECC_8BITS * (denali->mtd.writesize / ECC_SECTOR_SIZE))) { printk(KERN_ERR "Your NAND chip OOB is not large enough to" " contain 8bit ECC correction codes"); - goto failed_nand; + goto failed_req_irq; } else { denali->nand.ecc.layout = &nand_8bit_oob; denali->nand.ecc.bytes = ECC_8BITS; - denali_write32(8, denali->flash_reg + ECC_CORRECTION); + iowrite32(8, denali->flash_reg + ECC_CORRECTION); } denali->nand.ecc.bytes *= denali->devnum; @@ -1769,28 +1660,31 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) if (nand_scan_tail(&denali->mtd)) { ret = -ENXIO; - goto failed_nand; + goto failed_req_irq; } ret = add_mtd_device(&denali->mtd); if (ret) { - printk(KERN_ERR "Spectra: Failed to register" - " MTD device: %d\n", ret); - goto failed_nand; + dev_err(&dev->dev, "Spectra: Failed to register MTD: %d\n", + ret); + goto failed_req_irq; } return 0; - failed_nand: +failed_req_irq: denali_irq_cleanup(dev->irq, denali); - failed_request_irq: - iounmap(denali->flash_reg); +failed_remap_mem: iounmap(denali->flash_mem); - failed_remap_csr: +failed_remap_reg: + iounmap(denali->flash_reg); +failed_req_regions: pci_release_regions(dev); - failed_req_csr: +failed_dma_map: pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE, PCI_DMA_BIDIRECTIONAL); - failed_enable: +failed_enable_dev: + pci_disable_device(dev); +failed_alloc_memery: kfree(denali); return ret; } @@ -1800,9 +1694,6 @@ static void denali_pci_remove(struct pci_dev *dev) { struct denali_nand_info *denali = pci_get_drvdata(dev); - nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n", - __FILE__, __LINE__, __func__); - nand_release(&denali->mtd); del_mtd_device(&denali->mtd); diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h index b680474e633..3918bcb1561 100644 --- a/drivers/mtd/nand/denali.h +++ b/drivers/mtd/nand/denali.h @@ -635,24 +635,6 @@ #define CLK_X 5 #define CLK_MULTI 4 -/* ffsport.h */ -#define VERBOSE 1 - -#define NAND_DBG_WARN 1 -#define NAND_DBG_DEBUG 2 -#define NAND_DBG_TRACE 3 - -#ifdef VERBOSE -#define nand_dbg_print(level, args...) \ - do { \ - if (level <= nand_debug_level) \ - printk(KERN_ALERT args); \ - } while (0) -#else -#define nand_dbg_print(level, args...) -#endif - - /* spectraswconfig.h */ #define CMD_DMA 0 diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index a3c7473dd40..d551ddd9537 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -2866,6 +2866,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, */ if (id_data[0] == id_data[6] && id_data[1] == id_data[7] && id_data[0] == NAND_MFR_SAMSUNG && + (chip->cellinfo & NAND_CI_CELLTYPE_MSK) && id_data[5] != 0x00) { /* Calc pagesize */ mtd->writesize = 2048 << (extid & 0x03); @@ -2934,14 +2935,10 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 32 - 1; /* Set the bad block position */ - if (!(busw & NAND_BUSWIDTH_16) && (*maf_id == NAND_MFR_STMICRO || - (*maf_id == NAND_MFR_SAMSUNG && - mtd->writesize == 512) || - *maf_id == NAND_MFR_AMD)) - chip->badblockpos = NAND_SMALL_BADBLOCK_POS; - else + if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16)) chip->badblockpos = NAND_LARGE_BADBLOCK_POS; - + else + chip->badblockpos = NAND_SMALL_BADBLOCK_POS; /* Get chip options, preserve non chip based options */ chip->options &= ~NAND_CHIPOPTIONS_MSK; diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index a04b89105b6..c65f19074bc 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c @@ -112,7 +112,7 @@ struct nand_flash_dev nand_flash_ids[] = { {"NAND 2GiB 3,3V 16-bit", 0xC5, 0, 2048, 0, LP_OPTIONS16}, /* 32 Gigabit */ - {"NAND 4GiB 3,3V 8-bit", 0xD7, 0, 4096, 0, LP_OPTIONS16}, + {"NAND 4GiB 3,3V 8-bit", 0xD7, 0, 4096, 0, LP_OPTIONS}, /* * Renesas AND 1 Gigabit. Those chips do not support extended id and diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c index 90e143e5ad3..317aff428e4 100644 --- a/drivers/mtd/nand/plat_nand.c +++ b/drivers/mtd/nand/plat_nand.c @@ -37,6 +37,11 @@ static int __devinit plat_nand_probe(struct platform_device *pdev) struct resource *res; int err = 0; + if (pdata->chip.nr_chips < 1) { + dev_err(&pdev->dev, "invalid number of chips specified\n"); + return -EINVAL; + } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENXIO; diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index e02fa4f0e3c..4d89f378020 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -363,7 +363,7 @@ static struct pxa3xx_nand_flash *builtin_flash_types[] = { #define tAR_NDTR1(r) (((r) >> 0) & 0xf) /* convert nano-seconds to nand flash controller clock cycles */ -#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) - 1) +#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000) /* convert nand flash controller clock cycles to nano-seconds */ #define cycle2ns(c, clk) ((((c) + 1) * 1000000 + clk / 500) / (clk / 1000)) diff --git a/drivers/mtd/ubi/Kconfig.debug b/drivers/mtd/ubi/Kconfig.debug index 2246f154e2f..61f6e5e4045 100644 --- a/drivers/mtd/ubi/Kconfig.debug +++ b/drivers/mtd/ubi/Kconfig.debug @@ -6,7 +6,7 @@ config MTD_UBI_DEBUG depends on SYSFS depends on MTD_UBI select DEBUG_FS - select KALLSYMS_ALL + select KALLSYMS_ALL if KALLSYMS && DEBUG_KERNEL help This option enables UBI debugging. diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index 4dfa6b90c21..3d2d1a69e9a 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c @@ -798,18 +798,18 @@ static int rename_volumes(struct ubi_device *ubi, goto out_free; } - re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL); - if (!re) { + re1 = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL); + if (!re1) { err = -ENOMEM; ubi_close_volume(desc); goto out_free; } - re->remove = 1; - re->desc = desc; - list_add(&re->list, &rename_list); + re1->remove = 1; + re1->desc = desc; + list_add(&re1->list, &rename_list); dbg_msg("will remove volume %d, name \"%s\"", - re->desc->vol->vol_id, re->desc->vol->name); + re1->desc->vol->vol_id, re1->desc->vol->name); } mutex_lock(&ubi->device_mutex); diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c index 372a15ac999..69b52e9c948 100644 --- a/drivers/mtd/ubi/scan.c +++ b/drivers/mtd/ubi/scan.c @@ -843,7 +843,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, case UBI_COMPAT_DELETE: ubi_msg("\"delete\" compatible internal volume %d:%d" " found, will remove it", vol_id, lnum); - err = add_to_list(si, pnum, ec, &si->corr); + err = add_to_list(si, pnum, ec, &si->erase); if (err) return err; return 0; diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index ee7b1d8fbb9..97a435672ea 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -1212,7 +1212,8 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) retry: spin_lock(&ubi->wl_lock); e = ubi->lookuptbl[pnum]; - if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) { + if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) || + in_wl_tree(e, &ubi->erroneous)) { spin_unlock(&ubi->wl_lock); return 0; } diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index c754d88e5ec..a045559c81c 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -633,7 +633,8 @@ struct vortex_private { open:1, medialock:1, must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ - large_frames:1; /* accept large frames */ + large_frames:1, /* accept large frames */ + handling_irq:1; /* private in_irq indicator */ int drv_flags; u16 status_enable; u16 intr_enable; @@ -646,7 +647,7 @@ struct vortex_private { u16 io_size; /* Size of PCI region (for release_region) */ /* Serialises access to hardware other than MII and variables below. - * The lock hierarchy is rtnl_lock > lock > mii_lock > window_lock. */ + * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */ spinlock_t lock; spinlock_t mii_lock; /* Serialises access to MII */ @@ -2133,6 +2134,15 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) dev->name, vp->cur_tx); } + /* + * We can't allow a recursion from our interrupt handler back into the + * tx routine, as they take the same spin lock, and that causes + * deadlock. Just return NETDEV_TX_BUSY and let the stack try again in + * a bit + */ + if (vp->handling_irq) + return NETDEV_TX_BUSY; + if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) { if (vortex_debug > 0) pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n", @@ -2335,11 +2345,13 @@ boomerang_interrupt(int irq, void *dev_id) ioaddr = vp->ioaddr; + /* * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout * and boomerang_start_xmit */ spin_lock(&vp->lock); + vp->handling_irq = 1; status = ioread16(ioaddr + EL3_STATUS); @@ -2447,6 +2459,7 @@ boomerang_interrupt(int irq, void *dev_id) pr_debug("%s: exiting interrupt, status %4.4x.\n", dev->name, status); handler_exit: + vp->handling_irq = 0; spin_unlock(&vp->lock); return IRQ_HANDLED; } @@ -2971,7 +2984,6 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { int err; struct vortex_private *vp = netdev_priv(dev); - unsigned long flags; pci_power_t state = 0; if(VORTEX_PCI(vp)) @@ -2981,9 +2993,7 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if(state != 0) pci_set_power_state(VORTEX_PCI(vp), PCI_D0); - spin_lock_irqsave(&vp->lock, flags); err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL); - spin_unlock_irqrestore(&vp->lock, flags); if(state != 0) pci_set_power_state(VORTEX_PCI(vp), state); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 5a6895320b4..2cc81a54cbf 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -928,6 +928,16 @@ config SMC91X The module will be called smc91x. If you want to compile it as a module, say M here and read <file:Documentation/kbuild/modules.txt>. +config PXA168_ETH + tristate "Marvell pxa168 ethernet support" + depends on CPU_PXA168 + select PHYLIB + help + This driver supports the pxa168 Ethernet ports. + + To compile this driver as a module, choose M here. The module + will be called pxa168_eth. + config NET_NETX tristate "NetX Ethernet support" select MII diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 56e8c27f77c..3e8f150c4b1 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -244,6 +244,7 @@ obj-$(CONFIG_MYRI10GE) += myri10ge/ obj-$(CONFIG_SMC91X) += smc91x.o obj-$(CONFIG_SMC911X) += smc911x.o obj-$(CONFIG_SMSC911X) += smsc911x.o +obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o obj-$(CONFIG_BFIN_MAC) += bfin_mac.o obj-$(CONFIG_DM9000) += dm9000.o obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 53af9c93e75..0c2d96ed561 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -20,8 +20,8 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.52.53-3" -#define DRV_MODULE_RELDATE "2010/18/04" +#define DRV_MODULE_VERSION "1.52.53-4" +#define DRV_MODULE_RELDATE "2010/16/08" #define BNX2X_BC_VER 0x040200 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index b4ec2b02a46..f8c3f08e4ce 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c @@ -4328,10 +4328,12 @@ static int bnx2x_init_port(struct bnx2x *bp) val |= aeu_gpio_mask; REG_WR(bp, offset, val); } + bp->port.need_hw_lock = 1; break; - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: + bp->port.need_hw_lock = 1; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: /* add SPIO 5 to group 0 */ { u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : @@ -4341,7 +4343,10 @@ static int bnx2x_init_port(struct bnx2x *bp) REG_WR(bp, reg_addr, val); } break; - + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: + bp->port.need_hw_lock = 1; + break; default: break; } diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig index 631a6242b01..75bfc3a9d95 100644 --- a/drivers/net/caif/Kconfig +++ b/drivers/net/caif/Kconfig @@ -15,7 +15,7 @@ config CAIF_TTY config CAIF_SPI_SLAVE tristate "CAIF SPI transport driver for slave interface" - depends on CAIF + depends on CAIF && HAS_DMA default n ---help--- The CAIF Link layer SPI Protocol driver for Slave SPI interface. diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index a4a0d2b6eb1..d3d4a57e245 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c @@ -936,12 +936,14 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) ew32(IMC, 0xffffffff); icr = er32(ICR); - /* Install any alternate MAC address into RAR0 */ - ret_val = e1000_check_alt_mac_addr_generic(hw); - if (ret_val) - return ret_val; + if (hw->mac.type == e1000_82571) { + /* Install any alternate MAC address into RAR0 */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + return ret_val; - e1000e_set_laa_state_82571(hw, true); + e1000e_set_laa_state_82571(hw, true); + } /* Reinitialize the 82571 serdes link state machine */ if (hw->phy.media_type == e1000_media_type_internal_serdes) @@ -1618,14 +1620,16 @@ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw) { s32 ret_val = 0; - /* - * If there's an alternate MAC address place it in RAR0 - * so that it will override the Si installed default perm - * address. - */ - ret_val = e1000_check_alt_mac_addr_generic(hw); - if (ret_val) - goto out; + if (hw->mac.type == e1000_82571) { + /* + * If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + goto out; + } ret_val = e1000_read_mac_addr_generic(hw); @@ -1833,6 +1837,7 @@ struct e1000_info e1000_82573_info = { | FLAG_HAS_SMART_POWER_DOWN | FLAG_HAS_AMT | FLAG_HAS_SWSM_ON_LOAD, + .flags2 = FLAG2_DISABLE_ASPM_L1, .pba = 20, .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, .get_variants = e1000_get_variants_82571, diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h index 307a72f483e..93b3bedae8d 100644 --- a/drivers/net/e1000e/defines.h +++ b/drivers/net/e1000e/defines.h @@ -621,6 +621,7 @@ #define E1000_FLASH_UPDATES 2000 /* NVM Word Offsets */ +#define NVM_COMPAT 0x0003 #define NVM_ID_LED_SETTINGS 0x0004 #define NVM_INIT_CONTROL2_REG 0x000F #define NVM_INIT_CONTROL3_PORT_B 0x0014 @@ -643,6 +644,9 @@ /* Mask bits for fields in Word 0x1a of the NVM */ #define NVM_WORD1A_ASPM_MASK 0x000C +/* Mask bits for fields in Word 0x03 of the EEPROM */ +#define NVM_COMPAT_LOM 0x0800 + /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ #define NVM_SUM 0xBABA diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index df4a2792293..0fd4eb5ac5f 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c @@ -183,6 +183,16 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) u16 offset, nvm_alt_mac_addr_offset, nvm_data; u8 alt_mac_addr[ETH_ALEN]; + ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data); + if (ret_val) + goto out; + + /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */ + if (!((nvm_data & NVM_COMPAT_LOM) || + (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) || + (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD))) + goto out; + ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, &nvm_alt_mac_addr_offset); if (ret_val) { diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 99a929964e3..1846623c6ae 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h @@ -40,7 +40,7 @@ #include <asm/io.h> #define DRV_NAME "ehea" -#define DRV_VERSION "EHEA_0105" +#define DRV_VERSION "EHEA_0106" /* eHEA capability flags */ #define DLPAR_PORT_ADD_REM 1 @@ -400,6 +400,7 @@ struct ehea_port_res { u32 poll_counter; struct net_lro_mgr lro_mgr; struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS]; + int sq_restart_flag; }; diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 897719b49f9..a333b42111b 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -776,6 +776,53 @@ static int ehea_proc_rwqes(struct net_device *dev, return processed; } +#define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull + +static void reset_sq_restart_flag(struct ehea_port *port) +{ + int i; + + for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { + struct ehea_port_res *pr = &port->port_res[i]; + pr->sq_restart_flag = 0; + } +} + +static void check_sqs(struct ehea_port *port) +{ + struct ehea_swqe *swqe; + int swqe_index; + int i, k; + + for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { + struct ehea_port_res *pr = &port->port_res[i]; + k = 0; + swqe = ehea_get_swqe(pr->qp, &swqe_index); + memset(swqe, 0, SWQE_HEADER_SIZE); + atomic_dec(&pr->swqe_avail); + + swqe->tx_control |= EHEA_SWQE_PURGE; + swqe->wr_id = SWQE_RESTART_CHECK; + swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; + swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT; + swqe->immediate_data_length = 80; + + ehea_post_swqe(pr->qp, swqe); + + while (pr->sq_restart_flag == 0) { + msleep(5); + if (++k == 100) { + ehea_error("HW/SW queues out of sync"); + ehea_schedule_port_reset(pr->port); + return; + } + } + } + + return; +} + + static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) { struct sk_buff *skb; @@ -793,6 +840,13 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) cqe_counter++; rmb(); + + if (cqe->wr_id == SWQE_RESTART_CHECK) { + pr->sq_restart_flag = 1; + swqe_av++; + break; + } + if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { ehea_error("Bad send completion status=0x%04X", cqe->status); @@ -2675,8 +2729,10 @@ static void ehea_flush_sq(struct ehea_port *port) int k = 0; while (atomic_read(&pr->swqe_avail) < swqe_max) { msleep(5); - if (++k == 20) + if (++k == 20) { + ehea_error("WARNING: sq not flushed completely"); break; + } } } } @@ -2917,6 +2973,7 @@ static void ehea_rereg_mrs(struct work_struct *work) port_napi_disable(port); mutex_unlock(&port->port_lock); } + reset_sq_restart_flag(port); } /* Unregister old memory region */ @@ -2951,6 +3008,7 @@ static void ehea_rereg_mrs(struct work_struct *work) mutex_lock(&port->port_lock); port_napi_enable(port); ret = ehea_restart_qps(dev); + check_sqs(port); if (!ret) netif_wake_queue(dev); mutex_unlock(&port->port_lock); diff --git a/drivers/net/ibm_newemac/debug.c b/drivers/net/ibm_newemac/debug.c index 3995fafc1e0..8c6c1e2a875 100644 --- a/drivers/net/ibm_newemac/debug.c +++ b/drivers/net/ibm_newemac/debug.c @@ -238,7 +238,7 @@ void emac_dbg_dump_all(void) } #if defined(CONFIG_MAGIC_SYSRQ) -static void emac_sysrq_handler(int key, struct tty_struct *tty) +static void emac_sysrq_handler(int key) { emac_dbg_dump_all(); } diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 2602852cc55..4734c939ad0 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c @@ -1113,7 +1113,8 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) struct ibmveth_adapter *adapter = netdev_priv(dev); struct vio_dev *viodev = adapter->vdev; int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; - int i; + int i, rc; + int need_restart = 0; if (new_mtu < IBMVETH_MAX_MTU) return -EINVAL; @@ -1127,35 +1128,32 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) /* Deactivate all the buffer pools so that the next loop can activate only the buffer pools necessary to hold the new MTU */ - for (i = 0; i < IbmVethNumBufferPools; i++) - if (adapter->rx_buff_pool[i].active) { - ibmveth_free_buffer_pool(adapter, - &adapter->rx_buff_pool[i]); - adapter->rx_buff_pool[i].active = 0; - } + if (netif_running(adapter->netdev)) { + need_restart = 1; + adapter->pool_config = 1; + ibmveth_close(adapter->netdev); + adapter->pool_config = 0; + } /* Look for an active buffer pool that can hold the new MTU */ for(i = 0; i<IbmVethNumBufferPools; i++) { adapter->rx_buff_pool[i].active = 1; if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { - if (netif_running(adapter->netdev)) { - adapter->pool_config = 1; - ibmveth_close(adapter->netdev); - adapter->pool_config = 0; - dev->mtu = new_mtu; - vio_cmo_set_dev_desired(viodev, - ibmveth_get_desired_dma - (viodev)); - return ibmveth_open(adapter->netdev); - } dev->mtu = new_mtu; vio_cmo_set_dev_desired(viodev, ibmveth_get_desired_dma (viodev)); + if (need_restart) { + return ibmveth_open(adapter->netdev); + } return 0; } } + + if (need_restart && (rc = ibmveth_open(adapter->netdev))) + return rc; + return -EINVAL; } diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c index c7b624711f5..bdf2149e529 100644 --- a/drivers/net/ll_temac_main.c +++ b/drivers/net/ll_temac_main.c @@ -902,8 +902,8 @@ temac_poll_controller(struct net_device *ndev) disable_irq(lp->tx_irq); disable_irq(lp->rx_irq); - ll_temac_rx_irq(lp->tx_irq, lp); - ll_temac_tx_irq(lp->rx_irq, lp); + ll_temac_rx_irq(lp->tx_irq, ndev); + ll_temac_tx_irq(lp->rx_irq, ndev); enable_irq(lp->tx_irq); enable_irq(lp->rx_irq); diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index ffa1b9ce1cc..6dca3574e35 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h @@ -53,8 +53,8 @@ #define _NETXEN_NIC_LINUX_MAJOR 4 #define _NETXEN_NIC_LINUX_MINOR 0 -#define _NETXEN_NIC_LINUX_SUBVERSION 73 -#define NETXEN_NIC_LINUX_VERSIONID "4.0.73" +#define _NETXEN_NIC_LINUX_SUBVERSION 74 +#define NETXEN_NIC_LINUX_VERSIONID "4.0.74" #define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) #define _major(v) (((v) >> 24) & 0xff) diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index c865dda2adf..cabae7bb1fc 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c @@ -1805,8 +1805,6 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid, netxen_ctx_msg msg = 0; struct list_head *head; - spin_lock(&rds_ring->lock); - producer = rds_ring->producer; head = &rds_ring->free_list; @@ -1853,8 +1851,6 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid, NETXEN_RCV_PRODUCER_OFFSET), msg); } } - - spin_unlock(&rds_ring->lock); } static void diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index fd86e18604e..73d31459223 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -2032,8 +2032,6 @@ struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev) struct netxen_adapter *adapter = netdev_priv(netdev); struct net_device_stats *stats = &netdev->stats; - memset(stats, 0, sizeof(*stats)); - stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; stats->tx_packets = adapter->stats.xmitfinished; stats->rx_bytes = adapter->stats.rxbytes; @@ -2133,9 +2131,16 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget) #ifdef CONFIG_NET_POLL_CONTROLLER static void netxen_nic_poll_controller(struct net_device *netdev) { + int ring; + struct nx_host_sds_ring *sds_ring; struct netxen_adapter *adapter = netdev_priv(netdev); + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + disable_irq(adapter->irq); - netxen_intr(adapter->irq, adapter); + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + netxen_intr(adapter->irq, sds_ring); + } enable_irq(adapter->irq); } #endif diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index c3edfe4c265..49279b0ee52 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c @@ -1637,6 +1637,7 @@ static struct pcmcia_device_id pcnet_ids[] = { PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCETTX", 0x547e66dc, 0x6fc5459b), PCMCIA_DEVICE_PROD_ID12("iPort", "10/100 Ethernet Card", 0x56c538d2, 0x11b0ffc0), PCMCIA_DEVICE_PROD_ID12("KANSAI ELECTRIC CO.,LTD", "KLA-PCM/T", 0xb18dc3b4, 0xcc51a956), + PCMCIA_DEVICE_PROD_ID12("KENTRONICS", "KEP-230", 0xaf8144c9, 0x868f6616), PCMCIA_DEVICE_PROD_ID12("KCI", "PE520 PCMCIA Ethernet Adapter", 0xa89b87d3, 0x1eb88e64), PCMCIA_DEVICE_PROD_ID12("KINGMAX", "EN10T2T", 0x7bcb459a, 0xa5c81fa5), PCMCIA_DEVICE_PROD_ID12("Kingston", "KNE-PC2", 0x1128e633, 0xce2a89b3), diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index 4eb6f986703..f5819526b5e 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c @@ -808,6 +808,7 @@ xirc2ps_config(struct pcmcia_device * link) } link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; + link->io_lines = 10; if (local->modem) { int pass; @@ -839,7 +840,6 @@ xirc2ps_config(struct pcmcia_device * link) } printk(KNOT_XIRC "no ports available\n"); } else { - link->io_lines = 10; link->resource[0]->end = 16; for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) { link->resource[0]->start = ioaddr; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index c0761197c07..16ddc77313c 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -466,6 +466,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, phydev->interface = interface; + phydev->state = PHY_READY; + /* Do initial configuration here, now that * we have certain key parameters * (dev_flags and interface) */ diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c new file mode 100644 index 00000000000..85eddda276b --- /dev/null +++ b/drivers/net/pxa168_eth.c @@ -0,0 +1,1666 @@ +/* + * PXA168 ethernet driver. + * Most of the code is derived from mv643xx ethernet driver. + * + * Copyright (C) 2010 Marvell International Ltd. + * Sachin Sanap <ssanap@marvell.com> + * Philip Rakity <prakity@marvell.com> + * Mark Brown <markb@marvell.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/init.h> +#include <linux/dma-mapping.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/udp.h> +#include <linux/etherdevice.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/ethtool.h> +#include <linux/platform_device.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/workqueue.h> +#include <linux/clk.h> +#include <linux/phy.h> +#include <linux/io.h> +#include <linux/types.h> +#include <asm/pgtable.h> +#include <asm/system.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <asm/cacheflush.h> +#include <linux/pxa168_eth.h> + +#define DRIVER_NAME "pxa168-eth" +#define DRIVER_VERSION "0.3" + +/* + * Registers + */ + +#define PHY_ADDRESS 0x0000 +#define SMI 0x0010 +#define PORT_CONFIG 0x0400 +#define PORT_CONFIG_EXT 0x0408 +#define PORT_COMMAND 0x0410 +#define PORT_STATUS 0x0418 +#define HTPR 0x0428 +#define SDMA_CONFIG 0x0440 +#define SDMA_CMD 0x0448 +#define INT_CAUSE 0x0450 +#define INT_W_CLEAR 0x0454 +#define INT_MASK 0x0458 +#define ETH_F_RX_DESC_0 0x0480 +#define ETH_C_RX_DESC_0 0x04A0 +#define ETH_C_TX_DESC_1 0x04E4 + +/* smi register */ +#define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */ +#define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */ +#define SMI_OP_W (0 << 26) /* Write operation */ +#define SMI_OP_R (1 << 26) /* Read operation */ + +#define PHY_WAIT_ITERATIONS 10 + +#define PXA168_ETH_PHY_ADDR_DEFAULT 0 +/* RX & TX descriptor command */ +#define BUF_OWNED_BY_DMA (1 << 31) + +/* RX descriptor status */ +#define RX_EN_INT (1 << 23) +#define RX_FIRST_DESC (1 << 17) +#define RX_LAST_DESC (1 << 16) +#define RX_ERROR (1 << 15) + +/* TX descriptor command */ +#define TX_EN_INT (1 << 23) +#define TX_GEN_CRC (1 << 22) +#define TX_ZERO_PADDING (1 << 18) +#define TX_FIRST_DESC (1 << 17) +#define TX_LAST_DESC (1 << 16) +#define TX_ERROR (1 << 15) + +/* SDMA_CMD */ +#define SDMA_CMD_AT (1 << 31) +#define SDMA_CMD_TXDL (1 << 24) +#define SDMA_CMD_TXDH (1 << 23) +#define SDMA_CMD_AR (1 << 15) +#define SDMA_CMD_ERD (1 << 7) + +/* Bit definitions of the Port Config Reg */ +#define PCR_HS (1 << 12) +#define PCR_EN (1 << 7) +#define PCR_PM (1 << 0) + +/* Bit definitions of the Port Config Extend Reg */ +#define PCXR_2BSM (1 << 28) +#define PCXR_DSCP_EN (1 << 21) +#define PCXR_MFL_1518 (0 << 14) +#define PCXR_MFL_1536 (1 << 14) +#define PCXR_MFL_2048 (2 << 14) +#define PCXR_MFL_64K (3 << 14) +#define PCXR_FLP (1 << 11) +#define PCXR_PRIO_TX_OFF 3 +#define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF) + +/* Bit definitions of the SDMA Config Reg */ +#define SDCR_BSZ_OFF 12 +#define SDCR_BSZ8 (3 << SDCR_BSZ_OFF) +#define SDCR_BSZ4 (2 << SDCR_BSZ_OFF) +#define SDCR_BSZ2 (1 << SDCR_BSZ_OFF) +#define SDCR_BSZ1 (0 << SDCR_BSZ_OFF) +#define SDCR_BLMR (1 << 6) +#define SDCR_BLMT (1 << 7) +#define SDCR_RIFB (1 << 9) +#define SDCR_RC_OFF 2 +#define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF) + +/* + * Bit definitions of the Interrupt Cause Reg + * and Interrupt MASK Reg is the same + */ +#define ICR_RXBUF (1 << 0) +#define ICR_TXBUF_H (1 << 2) +#define ICR_TXBUF_L (1 << 3) +#define ICR_TXEND_H (1 << 6) +#define ICR_TXEND_L (1 << 7) +#define ICR_RXERR (1 << 8) +#define ICR_TXERR_H (1 << 10) +#define ICR_TXERR_L (1 << 11) +#define ICR_TX_UDR (1 << 13) +#define ICR_MII_CH (1 << 28) + +#define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\ + ICR_TXERR_H | ICR_TXERR_L |\ + ICR_TXEND_H | ICR_TXEND_L |\ + ICR_RXBUF | ICR_RXERR | ICR_MII_CH) + +#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */ + +#define NUM_RX_DESCS 64 +#define NUM_TX_DESCS 64 + +#define HASH_ADD 0 +#define HASH_DELETE 1 +#define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */ +#define HOP_NUMBER 12 + +/* Bit definitions for Port status */ +#define PORT_SPEED_100 (1 << 0) +#define FULL_DUPLEX (1 << 1) +#define FLOW_CONTROL_ENABLED (1 << 2) +#define LINK_UP (1 << 3) + +/* Bit definitions for work to be done */ +#define WORK_LINK (1 << 0) +#define WORK_TX_DONE (1 << 1) + +/* + * Misc definitions. + */ +#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) + +struct rx_desc { + u32 cmd_sts; /* Descriptor command status */ + u16 byte_cnt; /* Descriptor buffer byte count */ + u16 buf_size; /* Buffer size */ + u32 buf_ptr; /* Descriptor buffer pointer */ + u32 next_desc_ptr; /* Next descriptor pointer */ +}; + +struct tx_desc { + u32 cmd_sts; /* Command/status field */ + u16 reserved; + u16 byte_cnt; /* buffer byte count */ + u32 buf_ptr; /* pointer to buffer for this descriptor */ + u32 next_desc_ptr; /* Pointer to next descriptor */ +}; + +struct pxa168_eth_private { + int port_num; /* User Ethernet port number */ + + int rx_resource_err; /* Rx ring resource error flag */ + + /* Next available and first returning Rx resource */ + int rx_curr_desc_q, rx_used_desc_q; + + /* Next available and first returning Tx resource */ + int tx_curr_desc_q, tx_used_desc_q; + + struct rx_desc *p_rx_desc_area; + dma_addr_t rx_desc_dma; + int rx_desc_area_size; + struct sk_buff **rx_skb; + + struct tx_desc *p_tx_desc_area; + dma_addr_t tx_desc_dma; + int tx_desc_area_size; + struct sk_buff **tx_skb; + + struct work_struct tx_timeout_task; + + struct net_device *dev; + struct napi_struct napi; + u8 work_todo; + int skb_size; + + struct net_device_stats stats; + /* Size of Tx Ring per queue */ + int tx_ring_size; + /* Number of tx descriptors in use */ + int tx_desc_count; + /* Size of Rx Ring per queue */ + int rx_ring_size; + /* Number of rx descriptors in use */ + int rx_desc_count; + + /* + * Used in case RX Ring is empty, which can occur when + * system does not have resources (skb's) + */ + struct timer_list timeout; + struct mii_bus *smi_bus; + struct phy_device *phy; + + /* clock */ + struct clk *clk; + struct pxa168_eth_platform_data *pd; + /* + * Ethernet controller base address. + */ + void __iomem *base; + + /* Pointer to the hardware address filter table */ + void *htpr; + dma_addr_t htpr_dma; +}; + +struct addr_table_entry { + __le32 lo; + __le32 hi; +}; + +/* Bit fields of a Hash Table Entry */ +enum hash_table_entry { + HASH_ENTRY_VALID = 1, + SKIP = 2, + HASH_ENTRY_RECEIVE_DISCARD = 4, + HASH_ENTRY_RECEIVE_DISCARD_BIT = 2 +}; + +static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); +static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd); +static int pxa168_init_hw(struct pxa168_eth_private *pep); +static void eth_port_reset(struct net_device *dev); +static void eth_port_start(struct net_device *dev); +static int pxa168_eth_open(struct net_device *dev); +static int pxa168_eth_stop(struct net_device *dev); +static int ethernet_phy_setup(struct net_device *dev); + +static inline u32 rdl(struct pxa168_eth_private *pep, int offset) +{ + return readl(pep->base + offset); +} + +static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data) +{ + writel(data, pep->base + offset); +} + +static void abort_dma(struct pxa168_eth_private *pep) +{ + int delay; + int max_retries = 40; + + do { + wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT); + udelay(100); + + delay = 10; + while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT)) + && delay-- > 0) { + udelay(10); + } + } while (max_retries-- > 0 && delay <= 0); + + if (max_retries <= 0) + printk(KERN_ERR "%s : DMA Stuck\n", __func__); +} + +static int ethernet_phy_get(struct pxa168_eth_private *pep) +{ + unsigned int reg_data; + + reg_data = rdl(pep, PHY_ADDRESS); + + return (reg_data >> (5 * pep->port_num)) & 0x1f; +} + +static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr) +{ + u32 reg_data; + int addr_shift = 5 * pep->port_num; + + reg_data = rdl(pep, PHY_ADDRESS); + reg_data &= ~(0x1f << addr_shift); + reg_data |= (phy_addr & 0x1f) << addr_shift; + wrl(pep, PHY_ADDRESS, reg_data); +} + +static void ethernet_phy_reset(struct pxa168_eth_private *pep) +{ + int data; + + data = phy_read(pep->phy, MII_BMCR); + if (data < 0) + return; + + data |= BMCR_RESET; + if (phy_write(pep->phy, MII_BMCR, data) < 0) + return; + + do { + data = phy_read(pep->phy, MII_BMCR); + } while (data >= 0 && data & BMCR_RESET); +} + +static void rxq_refill(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct sk_buff *skb; + struct rx_desc *p_used_rx_desc; + int used_rx_desc; + + while (pep->rx_desc_count < pep->rx_ring_size) { + int size; + + skb = dev_alloc_skb(pep->skb_size); + if (!skb) + break; + if (SKB_DMA_REALIGN) + skb_reserve(skb, SKB_DMA_REALIGN); + pep->rx_desc_count++; + /* Get 'used' Rx descriptor */ + used_rx_desc = pep->rx_used_desc_q; + p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc]; + size = skb->end - skb->data; + p_used_rx_desc->buf_ptr = dma_map_single(NULL, + skb->data, + size, + DMA_FROM_DEVICE); + p_used_rx_desc->buf_size = size; + pep->rx_skb[used_rx_desc] = skb; + + /* Return the descriptor to DMA ownership */ + wmb(); + p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT; + wmb(); + + /* Move the used descriptor pointer to the next descriptor */ + pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size; + + /* Any Rx return cancels the Rx resource error status */ + pep->rx_resource_err = 0; + + skb_reserve(skb, ETH_HW_IP_ALIGN); + } + + /* + * If RX ring is empty of SKB, set a timer to try allocating + * again at a later time. + */ + if (pep->rx_desc_count == 0) { + pep->timeout.expires = jiffies + (HZ / 10); + add_timer(&pep->timeout); + } +} + +static inline void rxq_refill_timer_wrapper(unsigned long data) +{ + struct pxa168_eth_private *pep = (void *)data; + napi_schedule(&pep->napi); +} + +static inline u8 flip_8_bits(u8 x) +{ + return (((x) & 0x01) << 3) | (((x) & 0x02) << 1) + | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3) + | (((x) & 0x10) << 3) | (((x) & 0x20) << 1) + | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3); +} + +static void nibble_swap_every_byte(unsigned char *mac_addr) +{ + int i; + for (i = 0; i < ETH_ALEN; i++) { + mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) | + ((mac_addr[i] & 0xf0) >> 4); + } +} + +static void inverse_every_nibble(unsigned char *mac_addr) +{ + int i; + for (i = 0; i < ETH_ALEN; i++) + mac_addr[i] = flip_8_bits(mac_addr[i]); +} + +/* + * ---------------------------------------------------------------------------- + * This function will calculate the hash function of the address. + * Inputs + * mac_addr_orig - MAC address. + * Outputs + * return the calculated entry. + */ +static u32 hash_function(unsigned char *mac_addr_orig) +{ + u32 hash_result; + u32 addr0; + u32 addr1; + u32 addr2; + u32 addr3; + unsigned char mac_addr[ETH_ALEN]; + + /* Make a copy of MAC address since we are going to performe bit + * operations on it + */ + memcpy(mac_addr, mac_addr_orig, ETH_ALEN); + + nibble_swap_every_byte(mac_addr); + inverse_every_nibble(mac_addr); + + addr0 = (mac_addr[5] >> 2) & 0x3f; + addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2); + addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1; + addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8); + + hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3); + hash_result = hash_result & 0x07ff; + return hash_result; +} + +/* + * ---------------------------------------------------------------------------- + * This function will add/del an entry to the address table. + * Inputs + * pep - ETHERNET . + * mac_addr - MAC address. + * skip - if 1, skip this address.Used in case of deleting an entry which is a + * part of chain in the hash table.We cant just delete the entry since + * that will break the chain.We need to defragment the tables time to + * time. + * rd - 0 Discard packet upon match. + * - 1 Receive packet upon match. + * Outputs + * address table entry is added/deleted. + * 0 if success. + * -ENOSPC if table full + */ +static int add_del_hash_entry(struct pxa168_eth_private *pep, + unsigned char *mac_addr, + u32 rd, u32 skip, int del) +{ + struct addr_table_entry *entry, *start; + u32 new_high; + u32 new_low; + u32 i; + + new_low = (((mac_addr[1] >> 4) & 0xf) << 15) + | (((mac_addr[1] >> 0) & 0xf) << 11) + | (((mac_addr[0] >> 4) & 0xf) << 7) + | (((mac_addr[0] >> 0) & 0xf) << 3) + | (((mac_addr[3] >> 4) & 0x1) << 31) + | (((mac_addr[3] >> 0) & 0xf) << 27) + | (((mac_addr[2] >> 4) & 0xf) << 23) + | (((mac_addr[2] >> 0) & 0xf) << 19) + | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT) + | HASH_ENTRY_VALID; + + new_high = (((mac_addr[5] >> 4) & 0xf) << 15) + | (((mac_addr[5] >> 0) & 0xf) << 11) + | (((mac_addr[4] >> 4) & 0xf) << 7) + | (((mac_addr[4] >> 0) & 0xf) << 3) + | (((mac_addr[3] >> 5) & 0x7) << 0); + + /* + * Pick the appropriate table, start scanning for free/reusable + * entries at the index obtained by hashing the specified MAC address + */ + start = (struct addr_table_entry *)(pep->htpr); + entry = start + hash_function(mac_addr); + for (i = 0; i < HOP_NUMBER; i++) { + if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) { + break; + } else { + /* if same address put in same position */ + if (((le32_to_cpu(entry->lo) & 0xfffffff8) == + (new_low & 0xfffffff8)) && + (le32_to_cpu(entry->hi) == new_high)) { + break; + } + } + if (entry == start + 0x7ff) + entry = start; + else + entry++; + } + + if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) && + (le32_to_cpu(entry->hi) != new_high) && del) + return 0; + + if (i == HOP_NUMBER) { + if (!del) { + printk(KERN_INFO "%s: table section is full, need to " + "move to 16kB implementation?\n", + __FILE__); + return -ENOSPC; + } else + return 0; + } + + /* + * Update the selected entry + */ + if (del) { + entry->hi = 0; + entry->lo = 0; + } else { + entry->hi = cpu_to_le32(new_high); + entry->lo = cpu_to_le32(new_low); + } + + return 0; +} + +/* + * ---------------------------------------------------------------------------- + * Create an addressTable entry from MAC address info + * found in the specifed net_device struct + * + * Input : pointer to ethernet interface network device structure + * Output : N/A + */ +static void update_hash_table_mac_address(struct pxa168_eth_private *pep, + unsigned char *oaddr, + unsigned char *addr) +{ + /* Delete old entry */ + if (oaddr) + add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE); + /* Add new entry */ + add_del_hash_entry(pep, addr, 1, 0, HASH_ADD); +} + +static int init_hash_table(struct pxa168_eth_private *pep) +{ + /* + * Hardware expects CPU to build a hash table based on a predefined + * hash function and populate it based on hardware address. The + * location of the hash table is identified by 32-bit pointer stored + * in HTPR internal register. Two possible sizes exists for the hash + * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB + * (16kB of DRAM required (4 x 4 kB banks)).We currently only support + * 1/2kB. + */ + /* TODO: Add support for 8kB hash table and alternative hash + * function.Driver can dynamically switch to them if the 1/2kB hash + * table is full. + */ + if (pep->htpr == NULL) { + pep->htpr = dma_alloc_coherent(pep->dev->dev.parent, + HASH_ADDR_TABLE_SIZE, + &pep->htpr_dma, GFP_KERNEL); + if (pep->htpr == NULL) + return -ENOMEM; + } + memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE); + wrl(pep, HTPR, pep->htpr_dma); + return 0; +} + +static void pxa168_eth_set_rx_mode(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct netdev_hw_addr *ha; + u32 val; + + val = rdl(pep, PORT_CONFIG); + if (dev->flags & IFF_PROMISC) + val |= PCR_PM; + else + val &= ~PCR_PM; + wrl(pep, PORT_CONFIG, val); + + /* + * Remove the old list of MAC address and add dev->addr + * and multicast address. + */ + memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE); + update_hash_table_mac_address(pep, NULL, dev->dev_addr); + + netdev_for_each_mc_addr(ha, dev) + update_hash_table_mac_address(pep, NULL, ha->addr); +} + +static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *sa = addr; + struct pxa168_eth_private *pep = netdev_priv(dev); + unsigned char oldMac[ETH_ALEN]; + + if (!is_valid_ether_addr(sa->sa_data)) + return -EINVAL; + memcpy(oldMac, dev->dev_addr, ETH_ALEN); + memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); + netif_addr_lock_bh(dev); + update_hash_table_mac_address(pep, oldMac, dev->dev_addr); + netif_addr_unlock_bh(dev); + return 0; +} + +static void eth_port_start(struct net_device *dev) +{ + unsigned int val = 0; + struct pxa168_eth_private *pep = netdev_priv(dev); + int tx_curr_desc, rx_curr_desc; + + /* Perform PHY reset, if there is a PHY. */ + if (pep->phy != NULL) { + struct ethtool_cmd cmd; + + pxa168_get_settings(pep->dev, &cmd); + ethernet_phy_reset(pep); + pxa168_set_settings(pep->dev, &cmd); + } + + /* Assignment of Tx CTRP of given queue */ + tx_curr_desc = pep->tx_curr_desc_q; + wrl(pep, ETH_C_TX_DESC_1, + (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc))); + + /* Assignment of Rx CRDP of given queue */ + rx_curr_desc = pep->rx_curr_desc_q; + wrl(pep, ETH_C_RX_DESC_0, + (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc))); + + wrl(pep, ETH_F_RX_DESC_0, + (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc))); + + /* Clear all interrupts */ + wrl(pep, INT_CAUSE, 0); + + /* Enable all interrupts for receive, transmit and error. */ + wrl(pep, INT_MASK, ALL_INTS); + + val = rdl(pep, PORT_CONFIG); + val |= PCR_EN; + wrl(pep, PORT_CONFIG, val); + + /* Start RX DMA engine */ + val = rdl(pep, SDMA_CMD); + val |= SDMA_CMD_ERD; + wrl(pep, SDMA_CMD, val); +} + +static void eth_port_reset(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + unsigned int val = 0; + + /* Stop all interrupts for receive, transmit and error. */ + wrl(pep, INT_MASK, 0); + + /* Clear all interrupts */ + wrl(pep, INT_CAUSE, 0); + + /* Stop RX DMA */ + val = rdl(pep, SDMA_CMD); + val &= ~SDMA_CMD_ERD; /* abort dma command */ + + /* Abort any transmit and receive operations and put DMA + * in idle state. + */ + abort_dma(pep); + + /* Disable port */ + val = rdl(pep, PORT_CONFIG); + val &= ~PCR_EN; + wrl(pep, PORT_CONFIG, val); +} + +/* + * txq_reclaim - Free the tx desc data for completed descriptors + * If force is non-zero, frees uncompleted descriptors as well + */ +static int txq_reclaim(struct net_device *dev, int force) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct tx_desc *desc; + u32 cmd_sts; + struct sk_buff *skb; + int tx_index; + dma_addr_t addr; + int count; + int released = 0; + + netif_tx_lock(dev); + + pep->work_todo &= ~WORK_TX_DONE; + while (pep->tx_desc_count > 0) { + tx_index = pep->tx_used_desc_q; + desc = &pep->p_tx_desc_area[tx_index]; + cmd_sts = desc->cmd_sts; + if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) { + if (released > 0) { + goto txq_reclaim_end; + } else { + released = -1; + goto txq_reclaim_end; + } + } + pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size; + pep->tx_desc_count--; + addr = desc->buf_ptr; + count = desc->byte_cnt; + skb = pep->tx_skb[tx_index]; + if (skb) + pep->tx_skb[tx_index] = NULL; + + if (cmd_sts & TX_ERROR) { + if (net_ratelimit()) + printk(KERN_ERR "%s: Error in TX\n", dev->name); + dev->stats.tx_errors++; + } + dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE); + if (skb) + dev_kfree_skb_irq(skb); + released++; + } +txq_reclaim_end: + netif_tx_unlock(dev); + return released; +} + +static void pxa168_eth_tx_timeout(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + + printk(KERN_INFO "%s: TX timeout desc_count %d\n", + dev->name, pep->tx_desc_count); + + schedule_work(&pep->tx_timeout_task); +} + +static void pxa168_eth_tx_timeout_task(struct work_struct *work) +{ + struct pxa168_eth_private *pep = container_of(work, + struct pxa168_eth_private, + tx_timeout_task); + struct net_device *dev = pep->dev; + pxa168_eth_stop(dev); + pxa168_eth_open(dev); +} + +static int rxq_process(struct net_device *dev, int budget) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + unsigned int received_packets = 0; + struct sk_buff *skb; + + while (budget-- > 0) { + int rx_next_curr_desc, rx_curr_desc, rx_used_desc; + struct rx_desc *rx_desc; + unsigned int cmd_sts; + + /* Do not process Rx ring in case of Rx ring resource error */ + if (pep->rx_resource_err) + break; + rx_curr_desc = pep->rx_curr_desc_q; + rx_used_desc = pep->rx_used_desc_q; + rx_desc = &pep->p_rx_desc_area[rx_curr_desc]; + cmd_sts = rx_desc->cmd_sts; + rmb(); + if (cmd_sts & (BUF_OWNED_BY_DMA)) + break; + skb = pep->rx_skb[rx_curr_desc]; + pep->rx_skb[rx_curr_desc] = NULL; + + rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size; + pep->rx_curr_desc_q = rx_next_curr_desc; + + /* Rx descriptors exhausted. */ + /* Set the Rx ring resource error flag */ + if (rx_next_curr_desc == rx_used_desc) + pep->rx_resource_err = 1; + pep->rx_desc_count--; + dma_unmap_single(NULL, rx_desc->buf_ptr, + rx_desc->buf_size, + DMA_FROM_DEVICE); + received_packets++; + /* + * Update statistics. + * Note byte count includes 4 byte CRC count + */ + stats->rx_packets++; + stats->rx_bytes += rx_desc->byte_cnt; + /* + * In case received a packet without first / last bits on OR + * the error summary bit is on, the packets needs to be droped. + */ + if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != + (RX_FIRST_DESC | RX_LAST_DESC)) + || (cmd_sts & RX_ERROR)) { + + stats->rx_dropped++; + if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != + (RX_FIRST_DESC | RX_LAST_DESC)) { + if (net_ratelimit()) + printk(KERN_ERR + "%s: Rx pkt on multiple desc\n", + dev->name); + } + if (cmd_sts & RX_ERROR) + stats->rx_errors++; + dev_kfree_skb_irq(skb); + } else { + /* + * The -4 is for the CRC in the trailer of the + * received packet + */ + skb_put(skb, rx_desc->byte_cnt - 4); + skb->protocol = eth_type_trans(skb, dev); + netif_receive_skb(skb); + } + dev->last_rx = jiffies; + } + /* Fill RX ring with skb's */ + rxq_refill(dev); + return received_packets; +} + +static int pxa168_eth_collect_events(struct pxa168_eth_private *pep, + struct net_device *dev) +{ + u32 icr; + int ret = 0; + + icr = rdl(pep, INT_CAUSE); + if (icr == 0) + return IRQ_NONE; + + wrl(pep, INT_CAUSE, ~icr); + if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) { + pep->work_todo |= WORK_TX_DONE; + ret = 1; + } + if (icr & ICR_RXBUF) + ret = 1; + if (icr & ICR_MII_CH) { + pep->work_todo |= WORK_LINK; + ret = 1; + } + return ret; +} + +static void handle_link_event(struct pxa168_eth_private *pep) +{ + struct net_device *dev = pep->dev; + u32 port_status; + int speed; + int duplex; + int fc; + + port_status = rdl(pep, PORT_STATUS); + if (!(port_status & LINK_UP)) { + if (netif_carrier_ok(dev)) { + printk(KERN_INFO "%s: link down\n", dev->name); + netif_carrier_off(dev); + txq_reclaim(dev, 1); + } + return; + } + if (port_status & PORT_SPEED_100) + speed = 100; + else + speed = 10; + + duplex = (port_status & FULL_DUPLEX) ? 1 : 0; + fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; + printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, " + "flow control %sabled\n", dev->name, + speed, duplex ? "full" : "half", fc ? "en" : "dis"); + if (!netif_carrier_ok(dev)) + netif_carrier_on(dev); +} + +static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct pxa168_eth_private *pep = netdev_priv(dev); + + if (unlikely(!pxa168_eth_collect_events(pep, dev))) + return IRQ_NONE; + /* Disable interrupts */ + wrl(pep, INT_MASK, 0); + napi_schedule(&pep->napi); + return IRQ_HANDLED; +} + +static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep) +{ + int skb_size; + + /* + * Reserve 2+14 bytes for an ethernet header (the hardware + * automatically prepends 2 bytes of dummy data to each + * received packet), 16 bytes for up to four VLAN tags, and + * 4 bytes for the trailing FCS -- 36 bytes total. + */ + skb_size = pep->dev->mtu + 36; + + /* + * Make sure that the skb size is a multiple of 8 bytes, as + * the lower three bits of the receive descriptor's buffer + * size field are ignored by the hardware. + */ + pep->skb_size = (skb_size + 7) & ~7; + + /* + * If NET_SKB_PAD is smaller than a cache line, + * netdev_alloc_skb() will cause skb->data to be misaligned + * to a cache line boundary. If this is the case, include + * some extra space to allow re-aligning the data area. + */ + pep->skb_size += SKB_DMA_REALIGN; + +} + +static int set_port_config_ext(struct pxa168_eth_private *pep) +{ + int skb_size; + + pxa168_eth_recalc_skb_size(pep); + if (pep->skb_size <= 1518) + skb_size = PCXR_MFL_1518; + else if (pep->skb_size <= 1536) + skb_size = PCXR_MFL_1536; + else if (pep->skb_size <= 2048) + skb_size = PCXR_MFL_2048; + else + skb_size = PCXR_MFL_64K; + + /* Extended Port Configuration */ + wrl(pep, + PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */ + PCXR_DSCP_EN | /* Enable DSCP in IP */ + skb_size | PCXR_FLP | /* do not force link pass */ + PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */ + + return 0; +} + +static int pxa168_init_hw(struct pxa168_eth_private *pep) +{ + int err = 0; + + /* Disable interrupts */ + wrl(pep, INT_MASK, 0); + wrl(pep, INT_CAUSE, 0); + /* Write to ICR to clear interrupts. */ + wrl(pep, INT_W_CLEAR, 0); + /* Abort any transmit and receive operations and put DMA + * in idle state. + */ + abort_dma(pep); + /* Initialize address hash table */ + err = init_hash_table(pep); + if (err) + return err; + /* SDMA configuration */ + wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */ + SDCR_RIFB | /* Rx interrupt on frame */ + SDCR_BLMT | /* Little endian transmit */ + SDCR_BLMR | /* Little endian receive */ + SDCR_RC_MAX_RETRANS); /* Max retransmit count */ + /* Port Configuration */ + wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */ + set_port_config_ext(pep); + + return err; +} + +static int rxq_init(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct rx_desc *p_rx_desc; + int size = 0, i = 0; + int rx_desc_num = pep->rx_ring_size; + + /* Allocate RX skb rings */ + pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size, + GFP_KERNEL); + if (!pep->rx_skb) { + printk(KERN_ERR "%s: Cannot alloc RX skb ring\n", dev->name); + return -ENOMEM; + } + /* Allocate RX ring */ + pep->rx_desc_count = 0; + size = pep->rx_ring_size * sizeof(struct rx_desc); + pep->rx_desc_area_size = size; + pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, + &pep->rx_desc_dma, GFP_KERNEL); + if (!pep->p_rx_desc_area) { + printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n", + dev->name, size); + goto out; + } + memset((void *)pep->p_rx_desc_area, 0, size); + /* initialize the next_desc_ptr links in the Rx descriptors ring */ + p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area; + for (i = 0; i < rx_desc_num; i++) { + p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma + + ((i + 1) % rx_desc_num) * sizeof(struct rx_desc); + } + /* Save Rx desc pointer to driver struct. */ + pep->rx_curr_desc_q = 0; + pep->rx_used_desc_q = 0; + pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc); + return 0; +out: + kfree(pep->rx_skb); + return -ENOMEM; +} + +static void rxq_deinit(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + int curr; + + /* Free preallocated skb's on RX rings */ + for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) { + if (pep->rx_skb[curr]) { + dev_kfree_skb(pep->rx_skb[curr]); + pep->rx_desc_count--; + } + } + if (pep->rx_desc_count) + printk(KERN_ERR + "Error in freeing Rx Ring. %d skb's still\n", + pep->rx_desc_count); + /* Free RX ring */ + if (pep->p_rx_desc_area) + dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size, + pep->p_rx_desc_area, pep->rx_desc_dma); + kfree(pep->rx_skb); +} + +static int txq_init(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct tx_desc *p_tx_desc; + int size = 0, i = 0; + int tx_desc_num = pep->tx_ring_size; + + pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size, + GFP_KERNEL); + if (!pep->tx_skb) { + printk(KERN_ERR "%s: Cannot alloc TX skb ring\n", dev->name); + return -ENOMEM; + } + /* Allocate TX ring */ + pep->tx_desc_count = 0; + size = pep->tx_ring_size * sizeof(struct tx_desc); + pep->tx_desc_area_size = size; + pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, + &pep->tx_desc_dma, GFP_KERNEL); + if (!pep->p_tx_desc_area) { + printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n", + dev->name, size); + goto out; + } + memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size); + /* Initialize the next_desc_ptr links in the Tx descriptors ring */ + p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area; + for (i = 0; i < tx_desc_num; i++) { + p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma + + ((i + 1) % tx_desc_num) * sizeof(struct tx_desc); + } + pep->tx_curr_desc_q = 0; + pep->tx_used_desc_q = 0; + pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc); + return 0; +out: + kfree(pep->tx_skb); + return -ENOMEM; +} + +static void txq_deinit(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + + /* Free outstanding skb's on TX ring */ + txq_reclaim(dev, 1); + BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q); + /* Free TX ring */ + if (pep->p_tx_desc_area) + dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size, + pep->p_tx_desc_area, pep->tx_desc_dma); + kfree(pep->tx_skb); +} + +static int pxa168_eth_open(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + int err; + + err = request_irq(dev->irq, pxa168_eth_int_handler, + IRQF_DISABLED, dev->name, dev); + if (err) { + dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n"); + return -EAGAIN; + } + pep->rx_resource_err = 0; + err = rxq_init(dev); + if (err != 0) + goto out_free_irq; + err = txq_init(dev); + if (err != 0) + goto out_free_rx_skb; + pep->rx_used_desc_q = 0; + pep->rx_curr_desc_q = 0; + + /* Fill RX ring with skb's */ + rxq_refill(dev); + pep->rx_used_desc_q = 0; + pep->rx_curr_desc_q = 0; + netif_carrier_off(dev); + eth_port_start(dev); + napi_enable(&pep->napi); + return 0; +out_free_rx_skb: + rxq_deinit(dev); +out_free_irq: + free_irq(dev->irq, dev); + return err; +} + +static int pxa168_eth_stop(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + eth_port_reset(dev); + + /* Disable interrupts */ + wrl(pep, INT_MASK, 0); + wrl(pep, INT_CAUSE, 0); + /* Write to ICR to clear interrupts. */ + wrl(pep, INT_W_CLEAR, 0); + napi_disable(&pep->napi); + del_timer_sync(&pep->timeout); + netif_carrier_off(dev); + free_irq(dev->irq, dev); + rxq_deinit(dev); + txq_deinit(dev); + + return 0; +} + +static int pxa168_eth_change_mtu(struct net_device *dev, int mtu) +{ + int retval; + struct pxa168_eth_private *pep = netdev_priv(dev); + + if ((mtu > 9500) || (mtu < 68)) + return -EINVAL; + + dev->mtu = mtu; + retval = set_port_config_ext(pep); + + if (!netif_running(dev)) + return 0; + + /* + * Stop and then re-open the interface. This will allocate RX + * skbs of the new MTU. + * There is a possible danger that the open will not succeed, + * due to memory being full. + */ + pxa168_eth_stop(dev); + if (pxa168_eth_open(dev)) { + dev_printk(KERN_ERR, &dev->dev, + "fatal error on re-opening device after " + "MTU change\n"); + } + + return 0; +} + +static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep) +{ + int tx_desc_curr; + + tx_desc_curr = pep->tx_curr_desc_q; + pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size; + BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q); + pep->tx_desc_count++; + + return tx_desc_curr; +} + +static int pxa168_rx_poll(struct napi_struct *napi, int budget) +{ + struct pxa168_eth_private *pep = + container_of(napi, struct pxa168_eth_private, napi); + struct net_device *dev = pep->dev; + int work_done = 0; + + if (unlikely(pep->work_todo & WORK_LINK)) { + pep->work_todo &= ~(WORK_LINK); + handle_link_event(pep); + } + /* + * We call txq_reclaim every time since in NAPI interupts are disabled + * and due to this we miss the TX_DONE interrupt,which is not updated in + * interrupt status register. + */ + txq_reclaim(dev, 0); + if (netif_queue_stopped(dev) + && pep->tx_ring_size - pep->tx_desc_count > 1) { + netif_wake_queue(dev); + } + work_done = rxq_process(dev, budget); + if (work_done < budget) { + napi_complete(napi); + wrl(pep, INT_MASK, ALL_INTS); + } + + return work_done; +} + +static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct tx_desc *desc; + int tx_index; + int length; + + tx_index = eth_alloc_tx_desc_index(pep); + desc = &pep->p_tx_desc_area[tx_index]; + length = skb->len; + pep->tx_skb[tx_index] = skb; + desc->byte_cnt = length; + desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); + wmb(); + desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC | + TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT; + wmb(); + wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD); + + stats->tx_bytes += skb->len; + stats->tx_packets++; + dev->trans_start = jiffies; + if (pep->tx_ring_size - pep->tx_desc_count <= 1) { + /* We handled the current skb, but now we are out of space.*/ + netif_stop_queue(dev); + } + + return NETDEV_TX_OK; +} + +static int smi_wait_ready(struct pxa168_eth_private *pep) +{ + int i = 0; + + /* wait for the SMI register to become available */ + for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) { + if (i == PHY_WAIT_ITERATIONS) + return -ETIMEDOUT; + msleep(10); + } + + return 0; +} + +static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum) +{ + struct pxa168_eth_private *pep = bus->priv; + int i = 0; + int val; + + if (smi_wait_ready(pep)) { + printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n"); + return -ETIMEDOUT; + } + wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R); + /* now wait for the data to be valid */ + for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) { + if (i == PHY_WAIT_ITERATIONS) { + printk(KERN_WARNING + "pxa168_eth: SMI bus read not valid\n"); + return -ENODEV; + } + msleep(10); + } + + return val & 0xffff; +} + +static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum, + u16 value) +{ + struct pxa168_eth_private *pep = bus->priv; + + if (smi_wait_ready(pep)) { + printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n"); + return -ETIMEDOUT; + } + + wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | + SMI_OP_W | (value & 0xffff)); + + if (smi_wait_ready(pep)) { + printk(KERN_ERR "pxa168_eth: SMI bus busy timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, + int cmd) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + if (pep->phy != NULL) + return phy_mii_ioctl(pep->phy, ifr, cmd); + + return -EOPNOTSUPP; +} + +static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr) +{ + struct mii_bus *bus = pep->smi_bus; + struct phy_device *phydev; + int start; + int num; + int i; + + if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) { + /* Scan entire range */ + start = ethernet_phy_get(pep); + num = 32; + } else { + /* Use phy addr specific to platform */ + start = phy_addr & 0x1f; + num = 1; + } + phydev = NULL; + for (i = 0; i < num; i++) { + int addr = (start + i) & 0x1f; + if (bus->phy_map[addr] == NULL) + mdiobus_scan(bus, addr); + + if (phydev == NULL) { + phydev = bus->phy_map[addr]; + if (phydev != NULL) + ethernet_phy_set_addr(pep, addr); + } + } + + return phydev; +} + +static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex) +{ + struct phy_device *phy = pep->phy; + ethernet_phy_reset(pep); + + phy_attach(pep->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_MII); + + if (speed == 0) { + phy->autoneg = AUTONEG_ENABLE; + phy->speed = 0; + phy->duplex = 0; + phy->supported &= PHY_BASIC_FEATURES; + phy->advertising = phy->supported | ADVERTISED_Autoneg; + } else { + phy->autoneg = AUTONEG_DISABLE; + phy->advertising = 0; + phy->speed = speed; + phy->duplex = duplex; + } + phy_start_aneg(phy); +} + +static int ethernet_phy_setup(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + + if (pep->pd->init) + pep->pd->init(); + pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f); + if (pep->phy != NULL) + phy_init(pep, pep->pd->speed, pep->pd->duplex); + update_hash_table_mac_address(pep, NULL, dev->dev_addr); + + return 0; +} + +static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + int err; + + err = phy_read_status(pep->phy); + if (err == 0) + err = phy_ethtool_gset(pep->phy, cmd); + + return err; +} + +static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + + return phy_ethtool_sset(pep->phy, cmd); +} + +static void pxa168_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strncpy(info->driver, DRIVER_NAME, 32); + strncpy(info->version, DRIVER_VERSION, 32); + strncpy(info->fw_version, "N/A", 32); + strncpy(info->bus_info, "N/A", 32); +} + +static u32 pxa168_get_link(struct net_device *dev) +{ + return !!netif_carrier_ok(dev); +} + +static const struct ethtool_ops pxa168_ethtool_ops = { + .get_settings = pxa168_get_settings, + .set_settings = pxa168_set_settings, + .get_drvinfo = pxa168_get_drvinfo, + .get_link = pxa168_get_link, +}; + +static const struct net_device_ops pxa168_eth_netdev_ops = { + .ndo_open = pxa168_eth_open, + .ndo_stop = pxa168_eth_stop, + .ndo_start_xmit = pxa168_eth_start_xmit, + .ndo_set_rx_mode = pxa168_eth_set_rx_mode, + .ndo_set_mac_address = pxa168_eth_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = pxa168_eth_do_ioctl, + .ndo_change_mtu = pxa168_eth_change_mtu, + .ndo_tx_timeout = pxa168_eth_tx_timeout, +}; + +static int pxa168_eth_probe(struct platform_device *pdev) +{ + struct pxa168_eth_private *pep = NULL; + struct net_device *dev = NULL; + struct resource *res; + struct clk *clk; + int err; + + printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n"); + + clk = clk_get(&pdev->dev, "MFUCLK"); + if (IS_ERR(clk)) { + printk(KERN_ERR "%s: Fast Ethernet failed to get clock\n", + DRIVER_NAME); + return -ENODEV; + } + clk_enable(clk); + + dev = alloc_etherdev(sizeof(struct pxa168_eth_private)); + if (!dev) { + err = -ENOMEM; + goto err_clk; + } + + platform_set_drvdata(pdev, dev); + pep = netdev_priv(dev); + pep->dev = dev; + pep->clk = clk; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + err = -ENODEV; + goto err_netdev; + } + pep->base = ioremap(res->start, res->end - res->start + 1); + if (pep->base == NULL) { + err = -ENOMEM; + goto err_netdev; + } + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + BUG_ON(!res); + dev->irq = res->start; + dev->netdev_ops = &pxa168_eth_netdev_ops; + dev->watchdog_timeo = 2 * HZ; + dev->base_addr = 0; + SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops); + + INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task); + + printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME); + random_ether_addr(dev->dev_addr); + + pep->pd = pdev->dev.platform_data; + pep->rx_ring_size = NUM_RX_DESCS; + if (pep->pd->rx_queue_size) + pep->rx_ring_size = pep->pd->rx_queue_size; + + pep->tx_ring_size = NUM_TX_DESCS; + if (pep->pd->tx_queue_size) + pep->tx_ring_size = pep->pd->tx_queue_size; + + pep->port_num = pep->pd->port_number; + /* Hardware supports only 3 ports */ + BUG_ON(pep->port_num > 2); + netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size); + + memset(&pep->timeout, 0, sizeof(struct timer_list)); + init_timer(&pep->timeout); + pep->timeout.function = rxq_refill_timer_wrapper; + pep->timeout.data = (unsigned long)pep; + + pep->smi_bus = mdiobus_alloc(); + if (pep->smi_bus == NULL) { + err = -ENOMEM; + goto err_base; + } + pep->smi_bus->priv = pep; + pep->smi_bus->name = "pxa168_eth smi"; + pep->smi_bus->read = pxa168_smi_read; + pep->smi_bus->write = pxa168_smi_write; + snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id); + pep->smi_bus->parent = &pdev->dev; + pep->smi_bus->phy_mask = 0xffffffff; + err = mdiobus_register(pep->smi_bus); + if (err) + goto err_free_mdio; + + pxa168_init_hw(pep); + err = ethernet_phy_setup(dev); + if (err) + goto err_mdiobus; + SET_NETDEV_DEV(dev, &pdev->dev); + err = register_netdev(dev); + if (err) + goto err_mdiobus; + return 0; + +err_mdiobus: + mdiobus_unregister(pep->smi_bus); +err_free_mdio: + mdiobus_free(pep->smi_bus); +err_base: + iounmap(pep->base); +err_netdev: + free_netdev(dev); +err_clk: + clk_disable(clk); + clk_put(clk); + return err; +} + +static int pxa168_eth_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct pxa168_eth_private *pep = netdev_priv(dev); + + if (pep->htpr) { + dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE, + pep->htpr, pep->htpr_dma); + pep->htpr = NULL; + } + if (pep->clk) { + clk_disable(pep->clk); + clk_put(pep->clk); + pep->clk = NULL; + } + if (pep->phy != NULL) + phy_detach(pep->phy); + + iounmap(pep->base); + pep->base = NULL; + mdiobus_unregister(pep->smi_bus); + mdiobus_free(pep->smi_bus); + unregister_netdev(dev); + flush_scheduled_work(); + free_netdev(dev); + platform_set_drvdata(pdev, NULL); + return 0; +} + +static void pxa168_eth_shutdown(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + eth_port_reset(dev); +} + +#ifdef CONFIG_PM +static int pxa168_eth_resume(struct platform_device *pdev) +{ + return -ENOSYS; +} + +static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state) +{ + return -ENOSYS; +} + +#else +#define pxa168_eth_resume NULL +#define pxa168_eth_suspend NULL +#endif + +static struct platform_driver pxa168_eth_driver = { + .probe = pxa168_eth_probe, + .remove = pxa168_eth_remove, + .shutdown = pxa168_eth_shutdown, + .resume = pxa168_eth_resume, + .suspend = pxa168_eth_suspend, + .driver = { + .name = DRIVER_NAME, + }, +}; + +static int __init pxa168_init_module(void) +{ + return platform_driver_register(&pxa168_eth_driver); +} + +static void __exit pxa168_cleanup_module(void) +{ + platform_driver_unregister(&pxa168_eth_driver); +} + +module_init(pxa168_init_module); +module_exit(pxa168_cleanup_module); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168"); +MODULE_ALIAS("platform:pxa168_eth"); diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c index bf6d87adda4..66eea597202 100644 --- a/drivers/net/qlcnic/qlcnic_main.c +++ b/drivers/net/qlcnic/qlcnic_main.c @@ -1983,8 +1983,6 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) struct qlcnic_adapter *adapter = netdev_priv(netdev); struct net_device_stats *stats = &netdev->stats; - memset(stats, 0, sizeof(*stats)); - stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; stats->tx_packets = adapter->stats.xmitfinished; stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes; @@ -2190,9 +2188,16 @@ static int qlcnic_rx_poll(struct napi_struct *napi, int budget) #ifdef CONFIG_NET_POLL_CONTROLLER static void qlcnic_poll_controller(struct net_device *netdev) { + int ring; + struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; + disable_irq(adapter->irq); - qlcnic_intr(adapter->irq, adapter); + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + qlcnic_intr(adapter->irq, sds_ring); + } enable_irq(adapter->irq); } #endif diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 8d63f69b27d..5f89e83501f 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c @@ -3919,12 +3919,12 @@ static int ql_adapter_down(struct ql_adapter *qdev) for (i = 0; i < qdev->rss_ring_count; i++) netif_napi_del(&qdev->rx_ring[i].napi); - ql_free_rx_buffers(qdev); - status = ql_adapter_reset(qdev); if (status) netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n", qdev->func); + ql_free_rx_buffers(qdev); + return status; } diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index f5a9eb1df59..79fd02bc69f 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c @@ -1437,7 +1437,7 @@ static const struct net_device_ops sh_eth_netdev_ops = { static int sh_eth_drv_probe(struct platform_device *pdev) { - int ret, i, devno = 0; + int ret, devno = 0; struct resource *res; struct net_device *ndev = NULL; struct sh_eth_private *mdp; diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 08e7b6abacd..8ed30fa35d0 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -58,6 +58,7 @@ #define USB_PRODUCT_IPHONE 0x1290 #define USB_PRODUCT_IPHONE_3G 0x1292 #define USB_PRODUCT_IPHONE_3GS 0x1294 +#define USB_PRODUCT_IPHONE_4 0x1297 #define IPHETH_USBINTF_CLASS 255 #define IPHETH_USBINTF_SUBCLASS 253 @@ -92,6 +93,10 @@ static struct usb_device_id ipheth_table[] = { USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS, IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, IPHETH_USBINTF_PROTO) }, + { USB_DEVICE_AND_INTERFACE_INFO( + USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4, + IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, + IPHETH_USBINTF_PROTO) }, { } }; MODULE_DEVICE_TABLE(usb, ipheth_table); diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c index a105087af96..f9aa1bc0a94 100644 --- a/drivers/net/wireless/adm8211.c +++ b/drivers/net/wireless/adm8211.c @@ -732,7 +732,7 @@ static int adm8211_rf_set_channel(struct ieee80211_hw *dev, unsigned int chan) /* Nothing to do for ADMtek BBP */ } else if (priv->bbp_type != ADM8211_TYPE_ADMTEK) - wiphy_debug(dev->wiphy, "unsupported bbp type %d\n", + wiphy_debug(dev->wiphy, "unsupported BBP type %d\n", priv->bbp_type); ADM8211_RESTORE(); @@ -1032,7 +1032,7 @@ static int adm8211_hw_init_bbp(struct ieee80211_hw *dev) break; } } else - wiphy_debug(dev->wiphy, "unsupported bbp %d\n", priv->bbp_type); + wiphy_debug(dev->wiphy, "unsupported BBP %d\n", priv->bbp_type); ADM8211_CSR_WRITE(SYNRF, 0); @@ -1525,7 +1525,7 @@ static int adm8211_start(struct ieee80211_hw *dev) retval = request_irq(priv->pdev->irq, adm8211_interrupt, IRQF_SHARED, "adm8211", dev); if (retval) { - wiphy_err(dev->wiphy, "failed to register irq handler\n"); + wiphy_err(dev->wiphy, "failed to register IRQ handler\n"); goto fail; } @@ -1902,7 +1902,7 @@ static int __devinit adm8211_probe(struct pci_dev *pdev, goto err_free_eeprom; } - wiphy_info(dev->wiphy, "hwaddr %pm, rev 0x%02x\n", + wiphy_info(dev->wiphy, "hwaddr %pM, Rev 0x%02x\n", dev->wiphy->perm_addr, pdev->revision); return 0; diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c index d5140a87f07..1128fa8c9ed 100644 --- a/drivers/net/wireless/at76c50x-usb.c +++ b/drivers/net/wireless/at76c50x-usb.c @@ -655,7 +655,7 @@ static int at76_get_hw_config(struct at76_priv *priv) exit: kfree(hwcfg); if (ret < 0) - wiphy_err(priv->hw->wiphy, "cannot get hw config (error %d)\n", + wiphy_err(priv->hw->wiphy, "cannot get HW Config (error %d)\n", ret); return ret; @@ -960,7 +960,7 @@ static void at76_dump_mib_mac_addr(struct at76_priv *priv) sizeof(struct mib_mac_addr)); if (ret < 0) { wiphy_err(priv->hw->wiphy, - "at76_get_mib (mac_addr) failed: %d\n", ret); + "at76_get_mib (MAC_ADDR) failed: %d\n", ret); goto exit; } @@ -989,7 +989,7 @@ static void at76_dump_mib_mac_wep(struct at76_priv *priv) sizeof(struct mib_mac_wep)); if (ret < 0) { wiphy_err(priv->hw->wiphy, - "at76_get_mib (mac_wep) failed: %d\n", ret); + "at76_get_mib (MAC_WEP) failed: %d\n", ret); goto exit; } @@ -1026,7 +1026,7 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv) sizeof(struct mib_mac_mgmt)); if (ret < 0) { wiphy_err(priv->hw->wiphy, - "at76_get_mib (mac_mgmt) failed: %d\n", ret); + "at76_get_mib (MAC_MGMT) failed: %d\n", ret); goto exit; } @@ -1062,7 +1062,7 @@ static void at76_dump_mib_mac(struct at76_priv *priv) ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(struct mib_mac)); if (ret < 0) { wiphy_err(priv->hw->wiphy, - "at76_get_mib (mac) failed: %d\n", ret); + "at76_get_mib (MAC) failed: %d\n", ret); goto exit; } @@ -1099,7 +1099,7 @@ static void at76_dump_mib_phy(struct at76_priv *priv) ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(struct mib_phy)); if (ret < 0) { wiphy_err(priv->hw->wiphy, - "at76_get_mib (phy) failed: %d\n", ret); + "at76_get_mib (PHY) failed: %d\n", ret); goto exit; } @@ -1132,7 +1132,7 @@ static void at76_dump_mib_local(struct at76_priv *priv) ret = at76_get_mib(priv->udev, MIB_LOCAL, m, sizeof(struct mib_local)); if (ret < 0) { wiphy_err(priv->hw->wiphy, - "at76_get_mib (local) failed: %d\n", ret); + "at76_get_mib (LOCAL) failed: %d\n", ret); goto exit; } @@ -1158,7 +1158,7 @@ static void at76_dump_mib_mdomain(struct at76_priv *priv) sizeof(struct mib_mdomain)); if (ret < 0) { wiphy_err(priv->hw->wiphy, - "at76_get_mib (mdomain) failed: %d\n", ret); + "at76_get_mib (MDOMAIN) failed: %d\n", ret); goto exit; } @@ -1229,7 +1229,7 @@ static int at76_submit_rx_urb(struct at76_priv *priv) struct sk_buff *skb = priv->rx_skb; if (!priv->rx_urb) { - wiphy_err(priv->hw->wiphy, "%s: priv->rx_urb is null\n", + wiphy_err(priv->hw->wiphy, "%s: priv->rx_urb is NULL\n", __func__); return -EFAULT; } @@ -1792,7 +1792,7 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb) wiphy_err(priv->hw->wiphy, "error in tx submit urb: %d\n", ret); if (ret == -EINVAL) wiphy_err(priv->hw->wiphy, - "-einval: tx urb %p hcpriv %p complete %p\n", + "-EINVAL: tx urb %p hcpriv %p complete %p\n", priv->tx_urb, priv->tx_urb->hcpriv, priv->tx_urb->complete); } @@ -2310,7 +2310,7 @@ static int at76_init_new_device(struct at76_priv *priv, priv->mac80211_registered = 1; - wiphy_info(priv->hw->wiphy, "usb %s, mac %pm, firmware %d.%d.%d-%d\n", + wiphy_info(priv->hw->wiphy, "USB %s, MAC %pM, firmware %d.%d.%d-%d\n", dev_name(&interface->dev), priv->mac_addr, priv->fw_version.major, priv->fw_version.minor, priv->fw_version.patch, priv->fw_version.build); diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c index c67b05f3bcb..debfb0fbc7c 100644 --- a/drivers/net/wireless/ath/ar9170/main.c +++ b/drivers/net/wireless/ath/ar9170/main.c @@ -245,7 +245,7 @@ static void __ar9170_dump_txstats(struct ar9170 *ar) { int i; - wiphy_debug(ar->hw->wiphy, "qos queue stats\n"); + wiphy_debug(ar->hw->wiphy, "QoS queue stats\n"); for (i = 0; i < __AR9170_NUM_TXQ; i++) wiphy_debug(ar->hw->wiphy, @@ -387,7 +387,7 @@ static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar, if (mac && compare_ether_addr(ieee80211_get_DA(hdr), mac)) { #ifdef AR9170_QUEUE_DEBUG wiphy_debug(ar->hw->wiphy, - "skip frame => da %pm != %pm\n", + "skip frame => DA %pM != %pM\n", mac, ieee80211_get_DA(hdr)); ar9170_print_txheader(ar, skb); #endif /* AR9170_QUEUE_DEBUG */ diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 0d5de2574dd..d77ce9906b6 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -48,6 +48,7 @@ #include <linux/netdevice.h> #include <linux/cache.h> #include <linux/pci.h> +#include <linux/pci-aspm.h> #include <linux/ethtool.h> #include <linux/uaccess.h> #include <linux/slab.h> @@ -476,6 +477,26 @@ ath5k_pci_probe(struct pci_dev *pdev, int ret; u8 csz; + /* + * L0s needs to be disabled on all ath5k cards. + * + * For distributions shipping with CONFIG_PCIEASPM (this will be enabled + * by default in the future in 2.6.36) this will also mean both L1 and + * L0s will be disabled when a pre 1.1 PCIe device is detected. We do + * know L1 works correctly even for all ath5k pre 1.1 PCIe devices + * though but cannot currently undue the effect of a blacklist, for + * details you can read pcie_aspm_sanity_check() and see how it adjusts + * the device link capability. + * + * It may be possible in the future to implement some PCI API to allow + * drivers to override blacklists for pre 1.1 PCIe but for now it is + * best to accept that both L0s and L1 will be disabled completely for + * distributions shipping with CONFIG_PCIEASPM rather than having this + * issue present. Motivation for adding this new API will be to help + * with power consumption for some of these devices. + */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S); + ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "can't enable device\n"); @@ -1306,6 +1327,10 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf, PCI_DMA_TODEVICE); rate = ieee80211_get_tx_rate(sc->hw, info); + if (!rate) { + ret = -EINVAL; + goto err_unmap; + } if (info->flags & IEEE80211_TX_CTL_NO_ACK) flags |= AR5K_TXDESC_NOACK; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index b883b174385..057fb69ddf7 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -797,7 +797,7 @@ static bool ar9300_uncompress_block(struct ath_hw *ah, length = block[it+1]; length &= 0xff; - if (length > 0 && spot >= 0 && spot+length < mdataSize) { + if (length > 0 && spot >= 0 && spot+length <= mdataSize) { ath_print(common, ATH_DBG_EEPROM, "Restore at %d: spot=%d " "offset=%d length=%d\n", diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h index 8750c558c22..0b09db0f8e7 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.h +++ b/drivers/net/wireless/ath/ath9k/eeprom.h @@ -62,7 +62,7 @@ #define SD_NO_CTL 0xE0 #define NO_CTL 0xff -#define CTL_MODE_M 7 +#define CTL_MODE_M 0xf #define CTL_11A 0 #define CTL_11B 1 #define CTL_11G 2 @@ -191,6 +191,7 @@ #define AR9287_EEP_NO_BACK_VER AR9287_EEP_MINOR_VER_1 #define AR9287_EEP_START_LOC 128 +#define AR9287_HTC_EEP_START_LOC 256 #define AR9287_NUM_2G_CAL_PIERS 3 #define AR9287_NUM_2G_CCK_TARGET_POWERS 3 #define AR9287_NUM_2G_20_TARGET_POWERS 3 diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c index 4a52cf03808..dff2da77731 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -34,9 +34,14 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) struct ar9287_eeprom *eep = &ah->eeprom.map9287; struct ath_common *common = ath9k_hw_common(ah); u16 *eep_data; - int addr, eep_start_loc = AR9287_EEP_START_LOC; + int addr, eep_start_loc; eep_data = (u16 *)eep; + if (ah->hw_version.devid == 0x7015) + eep_start_loc = AR9287_HTC_EEP_START_LOC; + else + eep_start_loc = AR9287_EEP_START_LOC; + if (!ath9k_hw_use_flash(ah)) { ath_print(common, ATH_DBG_EEPROM, "Reading from EEPROM, not flash\n"); diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 61c1bee3f26..17e7a9a367e 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -799,7 +799,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev) } kfree(buf); - if (hif_dev->device_id == 0x7010) + if ((hif_dev->device_id == 0x7010) || (hif_dev->device_id == 0x7015)) firm_offset = AR7010_FIRMWARE_TEXT; else firm_offset = AR9271_FIRMWARE_TEXT; @@ -901,6 +901,7 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface, switch(hif_dev->device_id) { case 0x7010: + case 0x7015: case 0x9018: if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202) hif_dev->fw_name = FIRMWARE_AR7010_1_1; @@ -912,11 +913,6 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface, break; } - if (!hif_dev->fw_name) { - dev_err(&udev->dev, "Can't determine firmware !\n"); - goto err_htc_hw_alloc; - } - ret = ath9k_hif_usb_dev_init(hif_dev); if (ret) { ret = -EINVAL; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 148b43317fd..2d4279191d7 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -245,6 +245,7 @@ static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid) switch(devid) { case 0x7010: + case 0x7015: case 0x9018: priv->htc->credits = 45; break; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index ebed9d1691a..7d09b4b17bb 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -366,7 +366,8 @@ static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv, caps = WLAN_RC_HT_FLAG; if (sta->ht_cap.mcs.rx_mask[1]) caps |= WLAN_RC_DS_FLAG; - if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) + if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) && + (conf_is_ht40(&priv->hw->conf))) caps |= WLAN_RC_40_FLAG; if (conf_is_ht40(&priv->hw->conf) && (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)) diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index bd0b4acc3ec..2a6e45a293a 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -78,18 +78,23 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb) struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_sta *sta = tx_info->control.sta; struct ath9k_htc_sta *ista; - struct ath9k_htc_vif *avp; struct ath9k_htc_tx_ctl tx_ctl; enum htc_endpoint_id epid; u16 qnum; __le16 fc; u8 *tx_fhdr; - u8 sta_idx; + u8 sta_idx, vif_idx; hdr = (struct ieee80211_hdr *) skb->data; fc = hdr->frame_control; - avp = (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv; + if (tx_info->control.vif && + (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv) + vif_idx = ((struct ath9k_htc_vif *) + tx_info->control.vif->drv_priv)->index; + else + vif_idx = priv->nvifs; + if (sta) { ista = (struct ath9k_htc_sta *) sta->drv_priv; sta_idx = ista->index; @@ -106,7 +111,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb) memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr)); tx_hdr.node_idx = sta_idx; - tx_hdr.vif_idx = avp->index; + tx_hdr.vif_idx = vif_idx; if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { tx_ctl.type = ATH9K_HTC_AMPDU; @@ -169,7 +174,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb) tx_ctl.type = ATH9K_HTC_NORMAL; mgmt_hdr.node_idx = sta_idx; - mgmt_hdr.vif_idx = avp->index; + mgmt_hdr.vif_idx = vif_idx; mgmt_hdr.tidno = 0; mgmt_hdr.flags = 0; diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h index 633e3d949ec..d01c4adab8d 100644 --- a/drivers/net/wireless/ath/ath9k/reg.h +++ b/drivers/net/wireless/ath/ath9k/reg.h @@ -899,6 +899,7 @@ #define AR_DEVID_7010(_ah) \ (((_ah)->hw_version.devid == 0x7010) || \ + ((_ah)->hw_version.devid == 0x7015) || \ ((_ah)->hw_version.devid == 0x9018)) #define AR_RADIO_SREV_MAJOR 0xf0 diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h index a1c39526161..345dd9721b4 100644 --- a/drivers/net/wireless/ath/regd.h +++ b/drivers/net/wireless/ath/regd.h @@ -31,7 +31,6 @@ enum ctl_group { #define NO_CTL 0xff #define SD_NO_CTL 0xE0 #define NO_CTL 0xff -#define CTL_MODE_M 7 #define CTL_11A 0 #define CTL_11B 1 #define CTL_11G 2 diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 16bbfa3189a..996e9d7d758 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -2723,14 +2723,6 @@ static void __ipw2100_rx_process(struct ipw2100_priv *priv) packet = &priv->rx_buffers[i]; - /* Sync the DMA for the STATUS buffer so CPU is sure to get - * the correct values */ - pci_dma_sync_single_for_cpu(priv->pci_dev, - sq->nic + - sizeof(struct ipw2100_status) * i, - sizeof(struct ipw2100_status), - PCI_DMA_FROMDEVICE); - /* Sync the DMA for the RX buffer so CPU is sure to get * the correct values */ pci_dma_sync_single_for_cpu(priv->pci_dev, packet->dma_addr, @@ -6665,12 +6657,13 @@ static int __init ipw2100_init(void) printk(KERN_INFO DRV_NAME ": %s, %s\n", DRV_DESCRIPTION, DRV_VERSION); printk(KERN_INFO DRV_NAME ": %s\n", DRV_COPYRIGHT); + pm_qos_add_request(&ipw2100_pm_qos_req, PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); + ret = pci_register_driver(&ipw2100_pci_driver); if (ret) goto out; - pm_qos_add_request(&ipw2100_pm_qos_req, PM_QOS_CPU_DMA_LATENCY, - PM_QOS_DEFAULT_VALUE); #ifdef CONFIG_IPW2100_DEBUG ipw2100_debug_level = debug; ret = driver_create_file(&ipw2100_pci_driver.driver, diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c index fec02621232..0b779a41a14 100644 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c @@ -265,7 +265,7 @@ struct iwl_cfg iwl1000_bgn_cfg = { .support_ct_kill_exit = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, .chain_noise_scale = 1000, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, .max_event_log_size = 128, .ucode_tracing = true, .sensitivity_calib_by_driver = true, @@ -297,7 +297,7 @@ struct iwl_cfg iwl1000_bg_cfg = { .support_ct_kill_exit = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, .chain_noise_scale = 1000, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, .max_event_log_size = 128, .ucode_tracing = true, .sensitivity_calib_by_driver = true, diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index 6950a783913..8ccfcd08218 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c @@ -2731,7 +2731,7 @@ static struct iwl_cfg iwl3945_bg_cfg = { .led_compensation = 64, .broken_powersave = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, .max_event_log_size = 512, .tx_power_by_driver = true, }; @@ -2752,7 +2752,7 @@ static struct iwl_cfg iwl3945_abg_cfg = { .led_compensation = 64, .broken_powersave = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, .max_event_log_size = 512, .tx_power_by_driver = true, }; diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index d6da356608f..d92b7290923 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c @@ -2322,7 +2322,7 @@ struct iwl_cfg iwl4965_agn_cfg = { .led_compensation = 61, .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, .temperature_kelvin = true, .max_event_log_size = 512, .tx_power_by_driver = true, diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index aacf3770f07..48bdcd8d2e9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -510,7 +510,7 @@ struct iwl_cfg iwl5300_agn_cfg = { .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, .chain_noise_scale = 1000, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, .max_event_log_size = 512, .ucode_tracing = true, .sensitivity_calib_by_driver = true, @@ -541,7 +541,7 @@ struct iwl_cfg iwl5100_bgn_cfg = { .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, .chain_noise_scale = 1000, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, .max_event_log_size = 512, .ucode_tracing = true, .sensitivity_calib_by_driver = true, @@ -570,7 +570,7 @@ struct iwl_cfg iwl5100_abg_cfg = { .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, .chain_noise_scale = 1000, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, .max_event_log_size = 512, .ucode_tracing = true, .sensitivity_calib_by_driver = true, @@ -601,7 +601,7 @@ struct iwl_cfg iwl5100_agn_cfg = { .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, .chain_noise_scale = 1000, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, .max_event_log_size = 512, .ucode_tracing = true, .sensitivity_calib_by_driver = true, @@ -632,7 +632,7 @@ struct iwl_cfg iwl5350_agn_cfg = { .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, .chain_noise_scale = 1000, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, .max_event_log_size = 512, .ucode_tracing = true, .sensitivity_calib_by_driver = true, @@ -663,7 +663,7 @@ struct iwl_cfg iwl5150_agn_cfg = { .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, .chain_noise_scale = 1000, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, .max_event_log_size = 512, .ucode_tracing = true, .sensitivity_calib_by_driver = true, @@ -693,7 +693,7 @@ struct iwl_cfg iwl5150_abg_cfg = { .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, .chain_noise_scale = 1000, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, .max_event_log_size = 512, .ucode_tracing = true, .sensitivity_calib_by_driver = true, diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index af4fd50f340..cee06b968de 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c @@ -388,7 +388,7 @@ struct iwl_cfg iwl6000g2a_2agn_cfg = { .support_ct_kill_exit = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, .chain_noise_scale = 1000, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, .max_event_log_size = 512, .ucode_tracing = true, .sensitivity_calib_by_driver = true, @@ -424,7 +424,7 @@ struct iwl_cfg iwl6000g2a_2abg_cfg = { .support_ct_kill_exit = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, .chain_noise_scale = 1000, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, .max_event_log_size = 512, .sensitivity_calib_by_driver = true, .chain_noise_calib_by_driver = true, @@ -459,7 +459,7 @@ struct iwl_cfg iwl6000g2a_2bg_cfg = { .support_ct_kill_exit = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, .chain_noise_scale = 1000, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, .max_event_log_size = 512, .sensitivity_calib_by_driver = true, .chain_noise_calib_by_driver = true, @@ -496,7 +496,7 @@ struct iwl_cfg iwl6000g2b_2agn_cfg = { .support_ct_kill_exit = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, .chain_noise_scale = 1000, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, .max_event_log_size = 512, .sensitivity_calib_by_driver = true, .chain_noise_calib_by_driver = true, @@ -532,7 +532,7 @@ struct iwl_cfg iwl6000g2b_2abg_cfg = { .support_ct_kill_exit = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, .chain_noise_scale = 1000, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, .max_event_log_size = 512, .sensitivity_calib_by_driver = true, .chain_noise_calib_by_driver = true, @@ -570,7 +570,7 @@ struct iwl_cfg iwl6000g2b_2bgn_cfg = { .support_ct_kill_exit = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, .chain_noise_scale = 1000, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, .max_event_log_size = 512, .sensitivity_calib_by_driver = true, .chain_noise_calib_by_driver = true, @@ -606,7 +606,7 @@ struct iwl_cfg iwl6000g2b_2bg_cfg = { .support_ct_kill_exit = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, .chain_noise_scale = 1000, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, .max_event_log_size = 512, .sensitivity_calib_by_driver = true, .chain_noise_calib_by_driver = true, @@ -644,7 +644,7 @@ struct iwl_cfg iwl6000g2b_bgn_cfg = { .support_ct_kill_exit = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, .chain_noise_scale = 1000, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, .max_event_log_size = 512, .sensitivity_calib_by_driver = true, .chain_noise_calib_by_driver = true, @@ -680,7 +680,7 @@ struct iwl_cfg iwl6000g2b_bg_cfg = { .support_ct_kill_exit = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, .chain_noise_scale = 1000, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, .max_event_log_size = 512, .sensitivity_calib_by_driver = true, .chain_noise_calib_by_driver = true, @@ -721,7 +721,7 @@ struct iwl_cfg iwl6000i_2agn_cfg = { .support_ct_kill_exit = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, .chain_noise_scale = 1000, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, .max_event_log_size = 1024, .ucode_tracing = true, .sensitivity_calib_by_driver = true, @@ -756,7 +756,7 @@ struct iwl_cfg iwl6000i_2abg_cfg = { .support_ct_kill_exit = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, .chain_noise_scale = 1000, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, .max_event_log_size = 1024, .ucode_tracing = true, .sensitivity_calib_by_driver = true, @@ -791,7 +791,7 @@ struct iwl_cfg iwl6000i_2bg_cfg = { .support_ct_kill_exit = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, .chain_noise_scale = 1000, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, .max_event_log_size = 1024, .ucode_tracing = true, .sensitivity_calib_by_driver = true, @@ -828,7 +828,7 @@ struct iwl_cfg iwl6050_2agn_cfg = { .support_ct_kill_exit = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, .chain_noise_scale = 1500, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, .max_event_log_size = 1024, .ucode_tracing = true, .sensitivity_calib_by_driver = true, @@ -866,7 +866,7 @@ struct iwl_cfg iwl6050g2_bgn_cfg = { .support_ct_kill_exit = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, .chain_noise_scale = 1500, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, .max_event_log_size = 1024, .ucode_tracing = true, .sensitivity_calib_by_driver = true, @@ -902,7 +902,7 @@ struct iwl_cfg iwl6050_2abg_cfg = { .support_ct_kill_exit = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, .chain_noise_scale = 1500, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, .max_event_log_size = 1024, .ucode_tracing = true, .sensitivity_calib_by_driver = true, @@ -940,7 +940,7 @@ struct iwl_cfg iwl6000_3agn_cfg = { .support_ct_kill_exit = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, .chain_noise_scale = 1000, - .monitor_recover_period = IWL_MONITORING_PERIOD, + .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, .max_event_log_size = 1024, .ucode_tracing = true, .sensitivity_calib_by_driver = true, diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index c1882fd8345..10d7b9b7f06 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -3667,6 +3667,49 @@ out_exit: IWL_DEBUG_MAC80211(priv, "leave\n"); } +static void iwlagn_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct iwl_priv *priv = hw->priv; + __le32 filter_or = 0, filter_nand = 0; + +#define CHK(test, flag) do { \ + if (*total_flags & (test)) \ + filter_or |= (flag); \ + else \ + filter_nand |= (flag); \ + } while (0) + + IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", + changed_flags, *total_flags); + + CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); + CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); + CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); + +#undef CHK + + mutex_lock(&priv->mutex); + + priv->staging_rxon.filter_flags &= ~filter_nand; + priv->staging_rxon.filter_flags |= filter_or; + + iwlcore_commit_rxon(priv); + + mutex_unlock(&priv->mutex); + + /* + * Receiving all multicast frames is always enabled by the + * default flags setup in iwl_connection_init_rx_config() + * since we currently do not support programming multicast + * filters into the device. + */ + *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | + FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; +} + static void iwl_mac_flush(struct ieee80211_hw *hw, bool drop) { struct iwl_priv *priv = hw->priv; @@ -3867,7 +3910,7 @@ static struct ieee80211_ops iwl_hw_ops = { .add_interface = iwl_mac_add_interface, .remove_interface = iwl_mac_remove_interface, .config = iwl_mac_config, - .configure_filter = iwl_configure_filter, + .configure_filter = iwlagn_configure_filter, .set_key = iwl_mac_set_key, .update_tkip_key = iwl_mac_update_tkip_key, .conf_tx = iwl_mac_conf_tx, diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index 2c03c6e20a7..07dbc279644 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c @@ -1328,51 +1328,6 @@ out: EXPORT_SYMBOL(iwl_apm_init); - -void iwl_configure_filter(struct ieee80211_hw *hw, - unsigned int changed_flags, - unsigned int *total_flags, - u64 multicast) -{ - struct iwl_priv *priv = hw->priv; - __le32 filter_or = 0, filter_nand = 0; - -#define CHK(test, flag) do { \ - if (*total_flags & (test)) \ - filter_or |= (flag); \ - else \ - filter_nand |= (flag); \ - } while (0) - - IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", - changed_flags, *total_flags); - - CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); - CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); - CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); - -#undef CHK - - mutex_lock(&priv->mutex); - - priv->staging_rxon.filter_flags &= ~filter_nand; - priv->staging_rxon.filter_flags |= filter_or; - - iwlcore_commit_rxon(priv); - - mutex_unlock(&priv->mutex); - - /* - * Receiving all multicast frames is always enabled by the - * default flags setup in iwl_connection_init_rx_config() - * since we currently do not support programming multicast - * filters into the device. - */ - *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | - FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; -} -EXPORT_SYMBOL(iwl_configure_filter); - int iwl_set_hw_params(struct iwl_priv *priv) { priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index 4a71dfb10a1..5e6ee3da6bb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h @@ -372,9 +372,6 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv, u32 decrypt_res, struct ieee80211_rx_status *stats); void iwl_irq_handle_error(struct iwl_priv *priv); -void iwl_configure_filter(struct ieee80211_hw *hw, - unsigned int changed_flags, - unsigned int *total_flags, u64 multicast); int iwl_set_hw_params(struct iwl_priv *priv); void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif); void iwl_bss_info_changed(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h index f35bcad56e3..2e97cd2fa98 100644 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h @@ -1049,7 +1049,8 @@ struct iwl_event_log { #define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5) /* timer constants use to monitor and recover stuck tx queues in mSecs */ -#define IWL_MONITORING_PERIOD (1000) +#define IWL_DEF_MONITORING_PERIOD (1000) +#define IWL_LONG_MONITORING_PERIOD (5000) #define IWL_ONE_HUNDRED_MSECS (100) #define IWL_SIXTY_SECS (60000) diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 70c4b8fba0e..59a308b02f9 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -3391,6 +3391,55 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw, return 0; } + +static void iwl3945_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct iwl_priv *priv = hw->priv; + __le32 filter_or = 0, filter_nand = 0; + +#define CHK(test, flag) do { \ + if (*total_flags & (test)) \ + filter_or |= (flag); \ + else \ + filter_nand |= (flag); \ + } while (0) + + IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", + changed_flags, *total_flags); + + CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); + CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); + CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); + +#undef CHK + + mutex_lock(&priv->mutex); + + priv->staging_rxon.filter_flags &= ~filter_nand; + priv->staging_rxon.filter_flags |= filter_or; + + /* + * Committing directly here breaks for some reason, + * but we'll eventually commit the filter flags + * change anyway. + */ + + mutex_unlock(&priv->mutex); + + /* + * Receiving all multicast frames is always enabled by the + * default flags setup in iwl_connection_init_rx_config() + * since we currently do not support programming multicast + * filters into the device. + */ + *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | + FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; +} + + /***************************************************************************** * * sysfs attributes @@ -3796,7 +3845,7 @@ static struct ieee80211_ops iwl3945_hw_ops = { .add_interface = iwl_mac_add_interface, .remove_interface = iwl_mac_remove_interface, .config = iwl_mac_config, - .configure_filter = iwl_configure_filter, + .configure_filter = iwl3945_configure_filter, .set_key = iwl3945_mac_set_key, .conf_tx = iwl_mac_conf_tx, .reset_tsf = iwl_mac_reset_tsf, diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c index ba854c70ab9..87b634978b3 100644 --- a/drivers/net/wireless/libertas/if_sdio.c +++ b/drivers/net/wireless/libertas/if_sdio.c @@ -128,7 +128,7 @@ struct if_sdio_card { bool helper_allocated; bool firmware_allocated; - u8 buffer[65536]; + u8 buffer[65536] __attribute__((aligned(4))); spinlock_t lock; struct if_sdio_packet *packets; diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 01ad7f77383..86fa8abdd66 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -486,7 +486,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, struct ieee80211_rx_status rx_status; if (data->idle) { - wiphy_debug(hw->wiphy, "trying to tx when idle - reject\n"); + wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n"); return false; } diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index d761ed2d8af..f152a25be59 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c @@ -910,14 +910,14 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index) rxq->rxd = pci_alloc_consistent(priv->pdev, size, &rxq->rxd_dma); if (rxq->rxd == NULL) { - wiphy_err(hw->wiphy, "failed to alloc rx descriptors\n"); + wiphy_err(hw->wiphy, "failed to alloc RX descriptors\n"); return -ENOMEM; } memset(rxq->rxd, 0, size); rxq->buf = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->buf), GFP_KERNEL); if (rxq->buf == NULL) { - wiphy_err(hw->wiphy, "failed to alloc rx skbuff list\n"); + wiphy_err(hw->wiphy, "failed to alloc RX skbuff list\n"); pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma); return -ENOMEM; } @@ -1145,14 +1145,14 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index) txq->txd = pci_alloc_consistent(priv->pdev, size, &txq->txd_dma); if (txq->txd == NULL) { - wiphy_err(hw->wiphy, "failed to alloc tx descriptors\n"); + wiphy_err(hw->wiphy, "failed to alloc TX descriptors\n"); return -ENOMEM; } memset(txq->txd, 0, size); txq->skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->skb), GFP_KERNEL); if (txq->skb == NULL) { - wiphy_err(hw->wiphy, "failed to alloc tx skbuff list\n"); + wiphy_err(hw->wiphy, "failed to alloc TX skbuff list\n"); pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma); return -ENOMEM; } @@ -1573,7 +1573,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd) PCI_DMA_BIDIRECTIONAL); if (!timeout) { - wiphy_err(hw->wiphy, "command %s timeout after %u ms\n", + wiphy_err(hw->wiphy, "Command %s timeout after %u ms\n", mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), MWL8K_CMD_TIMEOUT_MS); rc = -ETIMEDOUT; @@ -1584,11 +1584,11 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd) rc = cmd->result ? -EINVAL : 0; if (rc) - wiphy_err(hw->wiphy, "command %s error 0x%x\n", + wiphy_err(hw->wiphy, "Command %s error 0x%x\n", mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), le16_to_cpu(cmd->result)); else if (ms > 2000) - wiphy_notice(hw->wiphy, "command %s took %d ms\n", + wiphy_notice(hw->wiphy, "Command %s took %d ms\n", mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), ms); @@ -3210,7 +3210,7 @@ static int mwl8k_start(struct ieee80211_hw *hw) rc = request_irq(priv->pdev->irq, mwl8k_interrupt, IRQF_SHARED, MWL8K_NAME, hw); if (rc) { - wiphy_err(hw->wiphy, "failed to register irq handler\n"); + wiphy_err(hw->wiphy, "failed to register IRQ handler\n"); return -EIO; } @@ -3926,7 +3926,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev, priv->sram = pci_iomap(pdev, 0, 0x10000); if (priv->sram == NULL) { - wiphy_err(hw->wiphy, "cannot map device sram\n"); + wiphy_err(hw->wiphy, "Cannot map device SRAM\n"); goto err_iounmap; } @@ -3938,7 +3938,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev, if (priv->regs == NULL) { priv->regs = pci_iomap(pdev, 2, 0x10000); if (priv->regs == NULL) { - wiphy_err(hw->wiphy, "cannot map device registers\n"); + wiphy_err(hw->wiphy, "Cannot map device registers\n"); goto err_iounmap; } } @@ -3950,14 +3950,14 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev, /* Ask userland hotplug daemon for the device firmware */ rc = mwl8k_request_firmware(priv); if (rc) { - wiphy_err(hw->wiphy, "firmware files not found\n"); + wiphy_err(hw->wiphy, "Firmware files not found\n"); goto err_stop_firmware; } /* Load firmware into hardware */ rc = mwl8k_load_firmware(hw); if (rc) { - wiphy_err(hw->wiphy, "cannot start firmware\n"); + wiphy_err(hw->wiphy, "Cannot start firmware\n"); goto err_stop_firmware; } @@ -4047,7 +4047,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev, rc = request_irq(priv->pdev->irq, mwl8k_interrupt, IRQF_SHARED, MWL8K_NAME, hw); if (rc) { - wiphy_err(hw->wiphy, "failed to register irq handler\n"); + wiphy_err(hw->wiphy, "failed to register IRQ handler\n"); goto err_free_queues; } @@ -4067,7 +4067,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev, rc = mwl8k_cmd_get_hw_spec_sta(hw); } if (rc) { - wiphy_err(hw->wiphy, "cannot initialise firmware\n"); + wiphy_err(hw->wiphy, "Cannot initialise firmware\n"); goto err_free_irq; } @@ -4081,14 +4081,14 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev, /* Turn radio off */ rc = mwl8k_cmd_radio_disable(hw); if (rc) { - wiphy_err(hw->wiphy, "cannot disable\n"); + wiphy_err(hw->wiphy, "Cannot disable\n"); goto err_free_irq; } /* Clear MAC address */ rc = mwl8k_cmd_set_mac_addr(hw, NULL, "\x00\x00\x00\x00\x00\x00"); if (rc) { - wiphy_err(hw->wiphy, "cannot clear mac address\n"); + wiphy_err(hw->wiphy, "Cannot clear MAC address\n"); goto err_free_irq; } @@ -4098,7 +4098,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev, rc = ieee80211_register_hw(hw); if (rc) { - wiphy_err(hw->wiphy, "cannot register device\n"); + wiphy_err(hw->wiphy, "Cannot register device\n"); goto err_free_queues; } diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c index d687cb7f2a5..78347041ec4 100644 --- a/drivers/net/wireless/p54/eeprom.c +++ b/drivers/net/wireless/p54/eeprom.c @@ -167,7 +167,7 @@ static int p54_generate_band(struct ieee80211_hw *dev, } if (j == 0) { - wiphy_err(dev->wiphy, "disabling totally damaged %d GHz band\n", + wiphy_err(dev->wiphy, "Disabling totally damaged %d GHz band\n", (band == IEEE80211_BAND_2GHZ) ? 2 : 5); ret = -ENODATA; @@ -695,12 +695,12 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) u8 perm_addr[ETH_ALEN]; wiphy_warn(dev->wiphy, - "invalid hwaddr! using randomly generated mac addr\n"); + "Invalid hwaddr! Using randomly generated MAC addr\n"); random_ether_addr(perm_addr); SET_IEEE80211_PERM_ADDR(dev, perm_addr); } - wiphy_info(dev->wiphy, "hwaddr %pm, mac:isl38%02x rf:%s\n", + wiphy_info(dev->wiphy, "hwaddr %pM, MAC:isl38%02x RF:%s\n", dev->wiphy->perm_addr, priv->version, p54_rf_chips[priv->rxhw]); diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c index 47006bca485..15b20c29a60 100644 --- a/drivers/net/wireless/p54/fwio.c +++ b/drivers/net/wireless/p54/fwio.c @@ -125,7 +125,7 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw) if (fw_version) wiphy_info(priv->hw->wiphy, - "fw rev %s - softmac protocol %x.%x\n", + "FW rev %s - Softmac protocol %x.%x\n", fw_version, priv->fw_var >> 8, priv->fw_var & 0xff); if (priv->fw_var < 0x500) diff --git a/drivers/net/wireless/p54/led.c b/drivers/net/wireless/p54/led.c index ea91f5cce6b..3837e1eec5f 100644 --- a/drivers/net/wireless/p54/led.c +++ b/drivers/net/wireless/p54/led.c @@ -58,7 +58,7 @@ static void p54_update_leds(struct work_struct *work) err = p54_set_leds(priv); if (err && net_ratelimit()) wiphy_err(priv->hw->wiphy, - "failed to update leds (%d).\n", err); + "failed to update LEDs (%d).\n", err); if (rerun) ieee80211_queue_delayed_work(priv->hw, &priv->led_work, @@ -103,7 +103,7 @@ static int p54_register_led(struct p54_common *priv, err = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_dev); if (err) wiphy_err(priv->hw->wiphy, - "failed to register %s led.\n", name); + "Failed to register %s LED.\n", name); else led->registered = 1; diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index 822f8dc26e9..1eacba4daa5 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c @@ -466,7 +466,7 @@ static int p54p_open(struct ieee80211_hw *dev) P54P_READ(dev_int); if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) { - wiphy_err(dev->wiphy, "cannot boot firmware!\n"); + wiphy_err(dev->wiphy, "Cannot boot firmware!\n"); p54p_stop(dev); return -ETIMEDOUT; } diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c index 427b46f558e..0e937dc0c9c 100644 --- a/drivers/net/wireless/p54/txrx.c +++ b/drivers/net/wireless/p54/txrx.c @@ -446,7 +446,7 @@ static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb) } if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && - (!payload->status)) + !(payload->status & P54_TX_FAILED)) info->flags |= IEEE80211_TX_STAT_ACK; if (payload->status & P54_TX_PSM_CANCELLED) info->flags |= IEEE80211_TX_STAT_TX_FILTERED; @@ -540,7 +540,7 @@ static void p54_rx_trap(struct p54_common *priv, struct sk_buff *skb) case P54_TRAP_BEACON_TX: break; case P54_TRAP_RADAR: - wiphy_info(priv->hw->wiphy, "radar (freq:%d mhz)\n", freq); + wiphy_info(priv->hw->wiphy, "radar (freq:%d MHz)\n", freq); break; case P54_TRAP_NO_BEACON: if (priv->vif) diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c index b50c39aaec0..30107ce78df 100644 --- a/drivers/net/wireless/rtl818x/rtl8180_dev.c +++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c @@ -445,7 +445,7 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev) &priv->rx_ring_dma); if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) { - wiphy_err(dev->wiphy, "cannot allocate rx ring\n"); + wiphy_err(dev->wiphy, "Cannot allocate RX ring\n"); return -ENOMEM; } @@ -502,7 +502,7 @@ static int rtl8180_init_tx_ring(struct ieee80211_hw *dev, ring = pci_alloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma); if (!ring || (unsigned long)ring & 0xFF) { - wiphy_err(dev->wiphy, "cannot allocate tx ring (prio = %d)\n", + wiphy_err(dev->wiphy, "Cannot allocate TX ring (prio = %d)\n", prio); return -ENOMEM; } @@ -568,7 +568,7 @@ static int rtl8180_start(struct ieee80211_hw *dev) ret = request_irq(priv->pdev->irq, rtl8180_interrupt, IRQF_SHARED, KBUILD_MODNAME, dev); if (ret) { - wiphy_err(dev->wiphy, "failed to register irq handler\n"); + wiphy_err(dev->wiphy, "failed to register IRQ handler\n"); goto err_free_rings; } diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c index 5738a55c1b0..98e0351c1dd 100644 --- a/drivers/net/wireless/rtl818x/rtl8187_dev.c +++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c @@ -573,7 +573,7 @@ static int rtl8187_cmd_reset(struct ieee80211_hw *dev) } while (--i); if (!i) { - wiphy_err(dev->wiphy, "reset timeout!\n"); + wiphy_err(dev->wiphy, "Reset timeout!\n"); return -ETIMEDOUT; } @@ -1526,7 +1526,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, mutex_init(&priv->conf_mutex); skb_queue_head_init(&priv->b_tx_status.queue); - wiphy_info(dev->wiphy, "hwaddr %pm, %s v%d + %s, rfkill mask %d\n", + wiphy_info(dev->wiphy, "hwaddr %pM, %s V%d + %s, rfkill mask %d\n", mac_addr, chip_name, priv->asic_rev, priv->rf->name, priv->rfkill_mask); diff --git a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c index fd96f911232..97eebdcf7eb 100644 --- a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c +++ b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c @@ -366,7 +366,7 @@ static void rtl8225_rf_init(struct ieee80211_hw *dev) rtl8225_write(dev, 0x02, 0x044d); msleep(100); if (!(rtl8225_read(dev, 6) & (1 << 7))) - wiphy_warn(dev->wiphy, "rf calibration failed! %x\n", + wiphy_warn(dev->wiphy, "RF Calibration Failed! %x\n", rtl8225_read(dev, 6)); } @@ -735,7 +735,7 @@ static void rtl8225z2_rf_init(struct ieee80211_hw *dev) rtl8225_write(dev, 0x02, 0x044D); msleep(100); if (!(rtl8225_read(dev, 6) & (1 << 7))) - wiphy_warn(dev->wiphy, "rf calibration failed! %x\n", + wiphy_warn(dev->wiphy, "RF Calibration Failed! %x\n", rtl8225_read(dev, 6)); } diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.c b/drivers/net/wireless/wl12xx/wl1251_cmd.c index a37b30cef48..ce3722f4c3e 100644 --- a/drivers/net/wireless/wl12xx/wl1251_cmd.c +++ b/drivers/net/wireless/wl12xx/wl1251_cmd.c @@ -484,7 +484,7 @@ int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout) cmd->timeout = timeout; - ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd)); + ret = wl1251_cmd_send(wl, CMD_TRIGGER_SCAN_TO, cmd, sizeof(*cmd)); if (ret < 0) { wl1251_error("cmd trigger scan to failed: %d", ret); goto out; diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index b0de5794718..c3ceebb5be8 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -236,7 +236,7 @@ static inline u64 dma_pte_addr(struct dma_pte *pte) return pte->val & VTD_PAGE_MASK; #else /* Must have a full atomic 64-bit read */ - return __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK; + return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK; #endif } diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index 1694a0e2845..fd1d2867cdc 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c @@ -21,6 +21,8 @@ static int ir_ioapic_num, ir_hpet_num; int intr_remapping_enabled; static int disable_intremap; +static int disable_sourceid_checking; + static __init int setup_nointremap(char *str) { disable_intremap = 1; @@ -28,6 +30,22 @@ static __init int setup_nointremap(char *str) } early_param("nointremap", setup_nointremap); +static __init int setup_intremap(char *str) +{ + if (!str) + return -EINVAL; + + if (!strncmp(str, "on", 2)) + disable_intremap = 0; + else if (!strncmp(str, "off", 3)) + disable_intremap = 1; + else if (!strncmp(str, "nosid", 5)) + disable_sourceid_checking = 1; + + return 0; +} +early_param("intremap", setup_intremap); + struct irq_2_iommu { struct intel_iommu *iommu; u16 irte_index; @@ -453,6 +471,8 @@ int free_irte(int irq) static void set_irte_sid(struct irte *irte, unsigned int svt, unsigned int sq, unsigned int sid) { + if (disable_sourceid_checking) + svt = SVT_NO_VERIFY; irte->svt = svt; irte->sq = sq; irte->sid = sid; diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 79baa6368f7..cff7cc2c1f0 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -219,6 +219,13 @@ config SONYPI_COMPAT ---help--- Build the sonypi driver compatibility code into the sony-laptop driver. +config IDEAPAD_ACPI + tristate "Lenovo IdeaPad ACPI Laptop Extras" + depends on ACPI + depends on RFKILL + help + This is a driver for the rfkill switches on Lenovo IdeaPad netbooks. + config THINKPAD_ACPI tristate "ThinkPad ACPI Laptop Extras" depends on ACPI @@ -479,10 +486,12 @@ config TOPSTAR_LAPTOP config ACPI_TOSHIBA tristate "Toshiba Laptop Extras" depends on ACPI + depends on LEDS_CLASS + depends on NEW_LEDS + depends on BACKLIGHT_CLASS_DEVICE depends on INPUT depends on RFKILL || RFKILL = n select INPUT_POLLDEV - select BACKLIGHT_CLASS_DEVICE ---help--- This driver adds support for access to certain system settings on "legacy free" Toshiba laptops. These laptops can be recognized by diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index 4744c7744ff..85fb2b84f57 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_ACERHDF) += acerhdf.o obj-$(CONFIG_HP_WMI) += hp-wmi.o obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o +obj-$(CONFIG_IDEAPAD_ACPI) += ideapad_acpi.o obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o obj-$(CONFIG_PANASONIC_LAPTOP) += panasonic-laptop.o diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c index e058c2ba2a1..ca05aefd03b 100644 --- a/drivers/platform/x86/asus_acpi.c +++ b/drivers/platform/x86/asus_acpi.c @@ -938,10 +938,11 @@ static int set_brightness(int value) /* SPLV laptop */ if (hotk->methods->brightness_set) { if (!write_acpi_int(hotk->handle, hotk->methods->brightness_set, - value, NULL)) + value, NULL)) { printk(KERN_WARNING "Asus ACPI: Error changing brightness\n"); ret = -EIO; + } goto out; } @@ -953,10 +954,11 @@ static int set_brightness(int value) hotk->methods->brightness_down, NULL, NULL); (value > 0) ? value-- : value++; - if (ACPI_FAILURE(status)) + if (ACPI_FAILURE(status)) { printk(KERN_WARNING "Asus ACPI: Error changing brightness\n"); ret = -EIO; + } } out: return ret; diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c index d071ce05632..097083cac41 100644 --- a/drivers/platform/x86/compal-laptop.c +++ b/drivers/platform/x86/compal-laptop.c @@ -841,6 +841,14 @@ static struct dmi_system_id __initdata compal_dmi_table[] = { .callback = dmi_check_cb }, { + .ident = "Dell Mini 1012", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"), + }, + .callback = dmi_check_cb + }, + { .ident = "Dell Inspiron 11z", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), @@ -1092,5 +1100,6 @@ MODULE_ALIAS("dmi:*:rnJHL90:rvrREFERENCE:*"); MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron910:*"); MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1010:*"); MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1011:*"); +MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1012:*"); MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1110:*"); MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1210:*"); diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index b41ed5cab3e..4413975912e 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -122,6 +122,13 @@ static struct dmi_system_id __devinitdata dell_blacklist[] = { }, }, { + .ident = "Dell Mini 1012", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"), + }, + }, + { .ident = "Dell Inspiron 11z", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index f1551637498..c1741142a4c 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -79,12 +79,13 @@ struct bios_args { u32 command; u32 commandtype; u32 datasize; - char *data; + u32 data; }; struct bios_return { u32 sigpass; u32 return_code; + u32 value; }; struct key_entry { @@ -148,7 +149,7 @@ static struct platform_driver hp_wmi_driver = { * buffer = kzalloc(128, GFP_KERNEL); * ret = hp_wmi_perform_query(0x7, 0, buffer, 128) */ -static int hp_wmi_perform_query(int query, int write, char *buffer, +static int hp_wmi_perform_query(int query, int write, u32 *buffer, int buffersize) { struct bios_return bios_return; @@ -159,7 +160,7 @@ static int hp_wmi_perform_query(int query, int write, char *buffer, .command = write ? 0x2 : 0x1, .commandtype = query, .datasize = buffersize, - .data = buffer, + .data = *buffer, }; struct acpi_buffer input = { sizeof(struct bios_args), &args }; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; @@ -177,29 +178,14 @@ static int hp_wmi_perform_query(int query, int write, char *buffer, bios_return = *((struct bios_return *)obj->buffer.pointer); - if (bios_return.return_code) { - printk(KERN_WARNING PREFIX "Query %d returned %d\n", query, - bios_return.return_code); - kfree(obj); - return bios_return.return_code; - } - if (obj->buffer.length - sizeof(bios_return) > buffersize) { - kfree(obj); - return -EINVAL; - } - - memset(buffer, 0, buffersize); - memcpy(buffer, - ((char *)obj->buffer.pointer) + sizeof(struct bios_return), - obj->buffer.length - sizeof(bios_return)); - kfree(obj); + memcpy(buffer, &bios_return.value, sizeof(bios_return.value)); return 0; } static int hp_wmi_display_state(void) { - int state; - int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, (char *)&state, + int state = 0; + int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state, sizeof(state)); if (ret) return -EINVAL; @@ -208,8 +194,8 @@ static int hp_wmi_display_state(void) static int hp_wmi_hddtemp_state(void) { - int state; - int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, (char *)&state, + int state = 0; + int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state, sizeof(state)); if (ret) return -EINVAL; @@ -218,8 +204,8 @@ static int hp_wmi_hddtemp_state(void) static int hp_wmi_als_state(void) { - int state; - int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, (char *)&state, + int state = 0; + int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state, sizeof(state)); if (ret) return -EINVAL; @@ -228,8 +214,8 @@ static int hp_wmi_als_state(void) static int hp_wmi_dock_state(void) { - int state; - int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, (char *)&state, + int state = 0; + int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state, sizeof(state)); if (ret) @@ -240,8 +226,8 @@ static int hp_wmi_dock_state(void) static int hp_wmi_tablet_state(void) { - int state; - int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, (char *)&state, + int state = 0; + int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state, sizeof(state)); if (ret) return ret; @@ -256,7 +242,7 @@ static int hp_wmi_set_block(void *data, bool blocked) int ret; ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, - (char *)&query, sizeof(query)); + &query, sizeof(query)); if (ret) return -EINVAL; return 0; @@ -268,10 +254,10 @@ static const struct rfkill_ops hp_wmi_rfkill_ops = { static bool hp_wmi_get_sw_state(enum hp_wmi_radio r) { - int wireless; + int wireless = 0; int mask; hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, - (char *)&wireless, sizeof(wireless)); + &wireless, sizeof(wireless)); /* TBD: Pass error */ mask = 0x200 << (r * 8); @@ -284,10 +270,10 @@ static bool hp_wmi_get_sw_state(enum hp_wmi_radio r) static bool hp_wmi_get_hw_state(enum hp_wmi_radio r) { - int wireless; + int wireless = 0; int mask; hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, - (char *)&wireless, sizeof(wireless)); + &wireless, sizeof(wireless)); /* TBD: Pass error */ mask = 0x800 << (r * 8); @@ -347,7 +333,7 @@ static ssize_t set_als(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { u32 tmp = simple_strtoul(buf, NULL, 10); - int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, (char *)&tmp, + int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp, sizeof(tmp)); if (ret) return -EINVAL; @@ -421,7 +407,7 @@ static void hp_wmi_notify(u32 value, void *context) static struct key_entry *key; union acpi_object *obj; u32 event_id, event_data; - int key_code, ret; + int key_code = 0, ret; u32 *location; acpi_status status; @@ -475,7 +461,7 @@ static void hp_wmi_notify(u32 value, void *context) break; case HPWMI_BEZEL_BUTTON: ret = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0, - (char *)&key_code, + &key_code, sizeof(key_code)); if (ret) break; @@ -578,9 +564,9 @@ static void cleanup_sysfs(struct platform_device *device) static int __devinit hp_wmi_bios_setup(struct platform_device *device) { int err; - int wireless; + int wireless = 0; - err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, (char *)&wireless, + err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, &wireless, sizeof(wireless)); if (err) return err; diff --git a/drivers/platform/x86/ideapad_acpi.c b/drivers/platform/x86/ideapad_acpi.c new file mode 100644 index 00000000000..798496353e8 --- /dev/null +++ b/drivers/platform/x86/ideapad_acpi.c @@ -0,0 +1,306 @@ +/* + * ideapad_acpi.c - Lenovo IdeaPad ACPI Extras + * + * Copyright © 2010 Intel Corporation + * Copyright © 2010 David Woodhouse <dwmw2@infradead.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <acpi/acpi_bus.h> +#include <acpi/acpi_drivers.h> +#include <linux/rfkill.h> + +#define IDEAPAD_DEV_CAMERA 0 +#define IDEAPAD_DEV_WLAN 1 +#define IDEAPAD_DEV_BLUETOOTH 2 +#define IDEAPAD_DEV_3G 3 +#define IDEAPAD_DEV_KILLSW 4 + +struct ideapad_private { + struct rfkill *rfk[5]; +}; + +static struct { + char *name; + int type; +} ideapad_rfk_data[] = { + /* camera has no rfkill */ + { "ideapad_wlan", RFKILL_TYPE_WLAN }, + { "ideapad_bluetooth", RFKILL_TYPE_BLUETOOTH }, + { "ideapad_3g", RFKILL_TYPE_WWAN }, + { "ideapad_killsw", RFKILL_TYPE_WLAN } +}; + +static int ideapad_dev_exists(int device) +{ + acpi_status status; + union acpi_object in_param; + struct acpi_object_list input = { 1, &in_param }; + struct acpi_buffer output; + union acpi_object out_obj; + + output.length = sizeof(out_obj); + output.pointer = &out_obj; + + in_param.type = ACPI_TYPE_INTEGER; + in_param.integer.value = device + 1; + + status = acpi_evaluate_object(NULL, "\\_SB_.DECN", &input, &output); + if (ACPI_FAILURE(status)) { + printk(KERN_WARNING "IdeaPAD \\_SB_.DECN method failed %d. Is this an IdeaPAD?\n", status); + return -ENODEV; + } + if (out_obj.type != ACPI_TYPE_INTEGER) { + printk(KERN_WARNING "IdeaPAD \\_SB_.DECN method returned unexpected type\n"); + return -ENODEV; + } + return out_obj.integer.value; +} + +static int ideapad_dev_get_state(int device) +{ + acpi_status status; + union acpi_object in_param; + struct acpi_object_list input = { 1, &in_param }; + struct acpi_buffer output; + union acpi_object out_obj; + + output.length = sizeof(out_obj); + output.pointer = &out_obj; + + in_param.type = ACPI_TYPE_INTEGER; + in_param.integer.value = device + 1; + + status = acpi_evaluate_object(NULL, "\\_SB_.GECN", &input, &output); + if (ACPI_FAILURE(status)) { + printk(KERN_WARNING "IdeaPAD \\_SB_.GECN method failed %d\n", status); + return -ENODEV; + } + if (out_obj.type != ACPI_TYPE_INTEGER) { + printk(KERN_WARNING "IdeaPAD \\_SB_.GECN method returned unexpected type\n"); + return -ENODEV; + } + return out_obj.integer.value; +} + +static int ideapad_dev_set_state(int device, int state) +{ + acpi_status status; + union acpi_object in_params[2]; + struct acpi_object_list input = { 2, in_params }; + + in_params[0].type = ACPI_TYPE_INTEGER; + in_params[0].integer.value = device + 1; + in_params[1].type = ACPI_TYPE_INTEGER; + in_params[1].integer.value = state; + + status = acpi_evaluate_object(NULL, "\\_SB_.SECN", &input, NULL); + if (ACPI_FAILURE(status)) { + printk(KERN_WARNING "IdeaPAD \\_SB_.SECN method failed %d\n", status); + return -ENODEV; + } + return 0; +} +static ssize_t show_ideapad_cam(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int state = ideapad_dev_get_state(IDEAPAD_DEV_CAMERA); + if (state < 0) + return state; + + return sprintf(buf, "%d\n", state); +} + +static ssize_t store_ideapad_cam(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int ret, state; + + if (!count) + return 0; + if (sscanf(buf, "%i", &state) != 1) + return -EINVAL; + ret = ideapad_dev_set_state(IDEAPAD_DEV_CAMERA, !!state); + if (ret < 0) + return ret; + return count; +} + +static DEVICE_ATTR(camera_power, 0644, show_ideapad_cam, store_ideapad_cam); + +static int ideapad_rfk_set(void *data, bool blocked) +{ + int device = (unsigned long)data; + + if (device == IDEAPAD_DEV_KILLSW) + return -EINVAL; + return ideapad_dev_set_state(device, !blocked); +} + +static struct rfkill_ops ideapad_rfk_ops = { + .set_block = ideapad_rfk_set, +}; + +static void ideapad_sync_rfk_state(struct acpi_device *adevice) +{ + struct ideapad_private *priv = dev_get_drvdata(&adevice->dev); + int hw_blocked = !ideapad_dev_get_state(IDEAPAD_DEV_KILLSW); + int i; + + rfkill_set_hw_state(priv->rfk[IDEAPAD_DEV_KILLSW], hw_blocked); + for (i = IDEAPAD_DEV_WLAN; i < IDEAPAD_DEV_KILLSW; i++) + if (priv->rfk[i]) + rfkill_set_hw_state(priv->rfk[i], hw_blocked); + if (hw_blocked) + return; + + for (i = IDEAPAD_DEV_WLAN; i < IDEAPAD_DEV_KILLSW; i++) + if (priv->rfk[i]) + rfkill_set_sw_state(priv->rfk[i], !ideapad_dev_get_state(i)); +} + +static int ideapad_register_rfkill(struct acpi_device *adevice, int dev) +{ + struct ideapad_private *priv = dev_get_drvdata(&adevice->dev); + int ret; + + priv->rfk[dev] = rfkill_alloc(ideapad_rfk_data[dev-1].name, &adevice->dev, + ideapad_rfk_data[dev-1].type, &ideapad_rfk_ops, + (void *)(long)dev); + if (!priv->rfk[dev]) + return -ENOMEM; + + ret = rfkill_register(priv->rfk[dev]); + if (ret) { + rfkill_destroy(priv->rfk[dev]); + return ret; + } + return 0; +} + +static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev) +{ + struct ideapad_private *priv = dev_get_drvdata(&adevice->dev); + + if (!priv->rfk[dev]) + return; + + rfkill_unregister(priv->rfk[dev]); + rfkill_destroy(priv->rfk[dev]); +} + +static const struct acpi_device_id ideapad_device_ids[] = { + { "VPC2004", 0}, + { "", 0}, +}; +MODULE_DEVICE_TABLE(acpi, ideapad_device_ids); + +static int ideapad_acpi_add(struct acpi_device *adevice) +{ + int i; + int devs_present[5]; + struct ideapad_private *priv; + + for (i = IDEAPAD_DEV_CAMERA; i < IDEAPAD_DEV_KILLSW; i++) { + devs_present[i] = ideapad_dev_exists(i); + if (devs_present[i] < 0) + return devs_present[i]; + } + + /* The hardware switch is always present */ + devs_present[IDEAPAD_DEV_KILLSW] = 1; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + if (devs_present[IDEAPAD_DEV_CAMERA]) { + int ret = device_create_file(&adevice->dev, &dev_attr_camera_power); + if (ret) { + kfree(priv); + return ret; + } + } + + dev_set_drvdata(&adevice->dev, priv); + for (i = IDEAPAD_DEV_WLAN; i <= IDEAPAD_DEV_KILLSW; i++) { + if (!devs_present[i]) + continue; + + ideapad_register_rfkill(adevice, i); + } + ideapad_sync_rfk_state(adevice); + return 0; +} + +static int ideapad_acpi_remove(struct acpi_device *adevice, int type) +{ + struct ideapad_private *priv = dev_get_drvdata(&adevice->dev); + int i; + + device_remove_file(&adevice->dev, &dev_attr_camera_power); + + for (i = IDEAPAD_DEV_WLAN; i <= IDEAPAD_DEV_KILLSW; i++) + ideapad_unregister_rfkill(adevice, i); + + dev_set_drvdata(&adevice->dev, NULL); + kfree(priv); + return 0; +} + +static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event) +{ + ideapad_sync_rfk_state(adevice); +} + +static struct acpi_driver ideapad_acpi_driver = { + .name = "ideapad_acpi", + .class = "IdeaPad", + .ids = ideapad_device_ids, + .ops.add = ideapad_acpi_add, + .ops.remove = ideapad_acpi_remove, + .ops.notify = ideapad_acpi_notify, + .owner = THIS_MODULE, +}; + + +static int __init ideapad_acpi_module_init(void) +{ + acpi_bus_register_driver(&ideapad_acpi_driver); + + return 0; +} + + +static void __exit ideapad_acpi_module_exit(void) +{ + acpi_bus_unregister_driver(&ideapad_acpi_driver); + +} + +MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); +MODULE_DESCRIPTION("IdeaPad ACPI Extras"); +MODULE_LICENSE("GPL"); + +module_init(ideapad_acpi_module_init); +module_exit(ideapad_acpi_module_exit); diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c index afe82e50dfe..9024480a822 100644 --- a/drivers/platform/x86/intel_ips.c +++ b/drivers/platform/x86/intel_ips.c @@ -1342,8 +1342,10 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips) limits = &ips_lv_limits; else if (strstr(boot_cpu_data.x86_model_id, "CPU U")) limits = &ips_ulv_limits; - else + else { dev_info(&ips->dev->dev, "No CPUID match found.\n"); + goto out; + } rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_power); tdp = turbo_power & TURBO_TDP_MASK; @@ -1432,6 +1434,12 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id) spin_lock_init(&ips->turbo_status_lock); + ret = pci_enable_device(dev); + if (ret) { + dev_err(&dev->dev, "can't enable PCI device, aborting\n"); + goto error_free; + } + if (!pci_resource_start(dev, 0)) { dev_err(&dev->dev, "TBAR not assigned, aborting\n"); ret = -ENXIO; @@ -1444,11 +1452,6 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id) goto error_free; } - ret = pci_enable_device(dev); - if (ret) { - dev_err(&dev->dev, "can't enable PCI device, aborting\n"); - goto error_free; - } ips->regmap = ioremap(pci_resource_start(dev, 0), pci_resource_len(dev, 0)); diff --git a/drivers/platform/x86/intel_rar_register.c b/drivers/platform/x86/intel_rar_register.c index 73f8e6d7266..2b11a33325e 100644 --- a/drivers/platform/x86/intel_rar_register.c +++ b/drivers/platform/x86/intel_rar_register.c @@ -145,7 +145,7 @@ static void free_rar_device(struct rar_device *rar) */ static struct rar_device *_rar_to_device(int rar, int *off) { - if (rar >= 0 && rar <= 3) { + if (rar >= 0 && rar < MRST_NUM_RAR) { *off = rar; return &my_rar_device; } diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index 943f9084dcb..6abe18e638e 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c @@ -487,7 +487,7 @@ int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data) mdelay(1); *data = readl(ipcdev.i2c_base + I2C_DATA_ADDR); } else if (cmd == IPC_I2C_WRITE) { - writel(addr, ipcdev.i2c_base + I2C_DATA_ADDR); + writel(*data, ipcdev.i2c_base + I2C_DATA_ADDR); mdelay(1); writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR); } else { diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 5d6119bed00..e35ed128bde 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -1911,6 +1911,17 @@ enum { /* hot key scan codes (derived from ACPI DSDT) */ TP_ACPI_HOTKEYSCAN_VOLUMEDOWN, TP_ACPI_HOTKEYSCAN_MUTE, TP_ACPI_HOTKEYSCAN_THINKPAD, + TP_ACPI_HOTKEYSCAN_UNK1, + TP_ACPI_HOTKEYSCAN_UNK2, + TP_ACPI_HOTKEYSCAN_UNK3, + TP_ACPI_HOTKEYSCAN_UNK4, + TP_ACPI_HOTKEYSCAN_UNK5, + TP_ACPI_HOTKEYSCAN_UNK6, + TP_ACPI_HOTKEYSCAN_UNK7, + TP_ACPI_HOTKEYSCAN_UNK8, + + /* Hotkey keymap size */ + TPACPI_HOTKEY_MAP_LEN }; enum { /* Keys/events available through NVRAM polling */ @@ -3082,6 +3093,8 @@ static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = { TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */ }; +typedef u16 tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN]; + static int __init hotkey_init(struct ibm_init_struct *iibm) { /* Requirements for changing the default keymaps: @@ -3113,9 +3126,17 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) * If the above is too much to ask, don't change the keymap. * Ask the thinkpad-acpi maintainer to do it, instead. */ - static u16 ibm_keycode_map[] __initdata = { + + enum keymap_index { + TPACPI_KEYMAP_IBM_GENERIC = 0, + TPACPI_KEYMAP_LENOVO_GENERIC, + }; + + static const tpacpi_keymap_t tpacpi_keymaps[] __initconst = { + /* Generic keymap for IBM ThinkPads */ + [TPACPI_KEYMAP_IBM_GENERIC] = { /* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */ - KEY_FN_F1, KEY_FN_F2, KEY_COFFEE, KEY_SLEEP, + KEY_FN_F1, KEY_BATTERY, KEY_COFFEE, KEY_SLEEP, KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8, KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND, @@ -3146,11 +3167,13 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) /* (assignments unknown, please report if found) */ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, - }; - static u16 lenovo_keycode_map[] __initdata = { + }, + + /* Generic keymap for Lenovo ThinkPads */ + [TPACPI_KEYMAP_LENOVO_GENERIC] = { /* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */ KEY_FN_F1, KEY_COFFEE, KEY_BATTERY, KEY_SLEEP, - KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8, + KEY_WLAN, KEY_CAMERA, KEY_SWITCHVIDEOMODE, KEY_FN_F8, KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND, /* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */ @@ -3189,11 +3212,25 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) /* (assignments unknown, please report if found) */ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, + }, + }; + + static const struct tpacpi_quirk tpacpi_keymap_qtable[] __initconst = { + /* Generic maps (fallback) */ + { + .vendor = PCI_VENDOR_ID_IBM, + .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY, + .quirks = TPACPI_KEYMAP_IBM_GENERIC, + }, + { + .vendor = PCI_VENDOR_ID_LENOVO, + .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY, + .quirks = TPACPI_KEYMAP_LENOVO_GENERIC, + }, }; -#define TPACPI_HOTKEY_MAP_LEN ARRAY_SIZE(ibm_keycode_map) -#define TPACPI_HOTKEY_MAP_SIZE sizeof(ibm_keycode_map) -#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(ibm_keycode_map[0]) +#define TPACPI_HOTKEY_MAP_SIZE sizeof(tpacpi_keymap_t) +#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_t[0]) int res, i; int status; @@ -3202,6 +3239,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) bool tabletsw_state = false; unsigned long quirks; + unsigned long keymap_id; vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, "initializing hotkey subdriver\n"); @@ -3342,7 +3380,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) goto err_exit; /* Set up key map */ - hotkey_keycode_map = kmalloc(TPACPI_HOTKEY_MAP_SIZE, GFP_KERNEL); if (!hotkey_keycode_map) { @@ -3352,17 +3389,14 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) goto err_exit; } - if (tpacpi_is_lenovo()) { - dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, - "using Lenovo default hot key map\n"); - memcpy(hotkey_keycode_map, &lenovo_keycode_map, - TPACPI_HOTKEY_MAP_SIZE); - } else { - dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, - "using IBM default hot key map\n"); - memcpy(hotkey_keycode_map, &ibm_keycode_map, - TPACPI_HOTKEY_MAP_SIZE); - } + keymap_id = tpacpi_check_quirks(tpacpi_keymap_qtable, + ARRAY_SIZE(tpacpi_keymap_qtable)); + BUG_ON(keymap_id >= ARRAY_SIZE(tpacpi_keymaps)); + dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, + "using keymap number %lu\n", keymap_id); + + memcpy(hotkey_keycode_map, &tpacpi_keymaps[keymap_id], + TPACPI_HOTKEY_MAP_SIZE); input_set_capability(tpacpi_inputdev, EV_MSC, MSC_SCAN); tpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE; @@ -3469,7 +3503,8 @@ static bool hotkey_notify_hotkey(const u32 hkey, *send_acpi_ev = true; *ignore_acpi_ev = false; - if (scancode > 0 && scancode < 0x21) { + /* HKEY event 0x1001 is scancode 0x00 */ + if (scancode > 0 && scancode <= TPACPI_HOTKEY_MAP_LEN) { scancode--; if (!(hotkey_source_mask & (1 << scancode))) { tpacpi_input_send_key_masked(scancode); @@ -6080,13 +6115,18 @@ static struct backlight_ops ibm_backlight_data = { /* --------------------------------------------------------------------- */ +/* + * Call _BCL method of video device. On some ThinkPads this will + * switch the firmware to the ACPI brightness control mode. + */ + static int __init tpacpi_query_bcl_levels(acpi_handle handle) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; int rc; - if (ACPI_SUCCESS(acpi_evaluate_object(handle, NULL, NULL, &buffer))) { + if (ACPI_SUCCESS(acpi_evaluate_object(handle, "_BCL", NULL, &buffer))) { obj = (union acpi_object *)buffer.pointer; if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) { printk(TPACPI_ERR "Unknown _BCL data, " @@ -6103,55 +6143,22 @@ static int __init tpacpi_query_bcl_levels(acpi_handle handle) return rc; } -static acpi_status __init tpacpi_acpi_walk_find_bcl(acpi_handle handle, - u32 lvl, void *context, void **rv) -{ - char name[ACPI_PATH_SEGMENT_LENGTH]; - struct acpi_buffer buffer = { sizeof(name), &name }; - - if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer)) && - !strncmp("_BCL", name, sizeof(name) - 1)) { - BUG_ON(!rv || !*rv); - **(int **)rv = tpacpi_query_bcl_levels(handle); - return AE_CTRL_TERMINATE; - } else { - return AE_OK; - } -} /* * Returns 0 (no ACPI _BCL or _BCL invalid), or size of brightness map */ static unsigned int __init tpacpi_check_std_acpi_brightness_support(void) { - int status; + acpi_handle video_device; int bcl_levels = 0; - void *bcl_ptr = &bcl_levels; - - if (!vid_handle) - TPACPI_ACPIHANDLE_INIT(vid); - - if (!vid_handle) - return 0; - - /* - * Search for a _BCL method, and execute it. This is safe on all - * ThinkPads, and as a side-effect, _BCL will place a Lenovo Vista - * BIOS in ACPI backlight control mode. We do NOT have to care - * about calling the _BCL method in an enabled video device, any - * will do for our purposes. - */ - status = acpi_walk_namespace(ACPI_TYPE_METHOD, vid_handle, 3, - tpacpi_acpi_walk_find_bcl, NULL, NULL, - &bcl_ptr); + tpacpi_acpi_handle_locate("video", ACPI_VIDEO_HID, &video_device); + if (video_device) + bcl_levels = tpacpi_query_bcl_levels(video_device); - if (ACPI_SUCCESS(status) && bcl_levels > 2) { - tp_features.bright_acpimode = 1; - return bcl_levels - 2; - } + tp_features.bright_acpimode = (bcl_levels > 0); - return 0; + return (bcl_levels > 2) ? (bcl_levels - 2) : 0; } /* @@ -6244,28 +6251,6 @@ static int __init brightness_init(struct ibm_init_struct *iibm) if (tp_features.bright_unkfw) return 1; - if (tp_features.bright_acpimode) { - if (acpi_video_backlight_support()) { - if (brightness_enable > 1) { - printk(TPACPI_NOTICE - "Standard ACPI backlight interface " - "available, not loading native one.\n"); - return 1; - } else if (brightness_enable == 1) { - printk(TPACPI_NOTICE - "Backlight control force enabled, even if standard " - "ACPI backlight interface is available\n"); - } - } else { - if (brightness_enable > 1) { - printk(TPACPI_NOTICE - "Standard ACPI backlight interface not " - "available, thinkpad_acpi native " - "brightness control enabled\n"); - } - } - } - if (!brightness_enable) { dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT, "brightness support disabled by " @@ -6273,6 +6258,26 @@ static int __init brightness_init(struct ibm_init_struct *iibm) return 1; } + if (acpi_video_backlight_support()) { + if (brightness_enable > 1) { + printk(TPACPI_INFO + "Standard ACPI backlight interface " + "available, not loading native one.\n"); + return 1; + } else if (brightness_enable == 1) { + printk(TPACPI_WARN + "Cannot enable backlight brightness support, " + "ACPI is already handling it. Refer to the " + "acpi_backlight kernel parameter\n"); + return 1; + } + } else if (tp_features.bright_acpimode && brightness_enable > 1) { + printk(TPACPI_NOTICE + "Standard ACPI backlight interface not " + "available, thinkpad_acpi native " + "brightness control enabled\n"); + } + /* * Check for module parameter bogosity, note that we * init brightness_mode to TPACPI_BRGHT_MODE_MAX in order to be diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c index c6cbcb3f925..0e9a309b966 100644 --- a/drivers/s390/char/ctrlchar.c +++ b/drivers/s390/char/ctrlchar.c @@ -16,12 +16,11 @@ #ifdef CONFIG_MAGIC_SYSRQ static int ctrlchar_sysrq_key; -static struct tty_struct *sysrq_tty; static void ctrlchar_handle_sysrq(struct work_struct *work) { - handle_sysrq(ctrlchar_sysrq_key, sysrq_tty); + handle_sysrq(ctrlchar_sysrq_key); } static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq); @@ -54,7 +53,6 @@ ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty) /* racy */ if (len == 3 && buf[1] == '-') { ctrlchar_sysrq_key = buf[2]; - sysrq_tty = tty; schedule_work(&ctrlchar_work); return CTRLCHAR_SYSRQ; } diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c index 18d9a497863..8cd58e412b5 100644 --- a/drivers/s390/char/keyboard.c +++ b/drivers/s390/char/keyboard.c @@ -305,7 +305,7 @@ kbd_keycode(struct kbd_data *kbd, unsigned int keycode) if (kbd->sysrq) { if (kbd->sysrq == K(KT_LATIN, '-')) { kbd->sysrq = 0; - handle_sysrq(value, kbd->tty); + handle_sysrq(value); return; } if (value == '-') { diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 6466231f338..bbf91aec64f 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -370,6 +370,14 @@ config ISCSI_TCP http://open-iscsi.org +config ISCSI_BOOT_SYSFS + tristate "iSCSI Boot Sysfs Interface" + default n + help + This option enables support for exposing iSCSI boot information + via sysfs to userspace. If you wish to export this information, + say Y. Otherwise, say N. + source "drivers/scsi/cxgb3i/Kconfig" source "drivers/scsi/bnx2i/Kconfig" source "drivers/scsi/be2iscsi/Kconfig" diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 2a3fca2eca6..2703c6ec5e3 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -42,6 +42,7 @@ obj-$(CONFIG_FCOE) += fcoe/ obj-$(CONFIG_FCOE_FNIC) += fnic/ obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o +obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o obj-$(CONFIG_SCSI_ZORRO7XX) += 53c700.o zorro7xx.o obj-$(CONFIG_A3000_SCSI) += a3000.o wd33c93.o diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 95a895dd4f1..c8dc392edd5 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -56,6 +56,7 @@ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/timer.h> +#include <linux/slab.h> #include <linux/pci.h> #include <linux/aer.h> #include <asm/dma.h> diff --git a/drivers/scsi/be2iscsi/Kconfig b/drivers/scsi/be2iscsi/Kconfig index 84c275fb9f6..ceaca32e788 100644 --- a/drivers/scsi/be2iscsi/Kconfig +++ b/drivers/scsi/be2iscsi/Kconfig @@ -2,6 +2,7 @@ config BE2ISCSI tristate "ServerEngines' 10Gbps iSCSI - BladeEngine 2" depends on PCI && SCSI && NET select SCSI_ISCSI_ATTRS + select ISCSI_BOOT_SYSFS help This driver implements the iSCSI functionality for ServerEngines' diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index 40641d0845f..5218de4ab35 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h @@ -162,6 +162,13 @@ struct be_mcc_mailbox { #define OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES 2 #define OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES 3 #define OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG 7 +#define OPCODE_COMMON_ISCSI_NTWK_SET_VLAN 14 +#define OPCODE_COMMON_ISCSI_NTWK_CONFIGURE_STATELESS_IP_ADDR 17 +#define OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR 21 +#define OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY 22 +#define OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY 23 +#define OPCODE_COMMON_ISCSI_NTWK_GET_ALL_IF_ID 24 +#define OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO 25 #define OPCODE_COMMON_ISCSI_SET_FRAGNUM_BITS_FOR_SGL_CRA 61 #define OPCODE_COMMON_ISCSI_DEFQ_CREATE 64 #define OPCODE_COMMON_ISCSI_DEFQ_DESTROY 65 @@ -237,11 +244,109 @@ struct be_cmd_resp_eq_create { u16 rsvd0; /* sword */ } __packed; +struct mgmt_chap_format { + u32 flags; + u8 intr_chap_name[256]; + u8 intr_secret[16]; + u8 target_chap_name[256]; + u8 target_secret[16]; + u16 intr_chap_name_length; + u16 intr_secret_length; + u16 target_chap_name_length; + u16 target_secret_length; +} __packed; + +struct mgmt_auth_method_format { + u8 auth_method_type; + u8 padding[3]; + struct mgmt_chap_format chap; +} __packed; + +struct mgmt_conn_login_options { + u8 flags; + u8 header_digest; + u8 data_digest; + u8 rsvd0; + u32 max_recv_datasegment_len_ini; + u32 max_recv_datasegment_len_tgt; + u32 tcp_mss; + u32 tcp_window_size; + struct mgmt_auth_method_format auth_data; +} __packed; + +struct ip_address_format { + u16 size_of_structure; + u8 reserved; + u8 ip_type; + u8 ip_address[16]; + u32 rsvd0; +} __packed; + +struct mgmt_conn_info { + u32 connection_handle; + u32 connection_status; + u16 src_port; + u16 dest_port; + u16 dest_port_redirected; + u16 cid; + u32 estimated_throughput; + struct ip_address_format src_ipaddr; + struct ip_address_format dest_ipaddr; + struct ip_address_format dest_ipaddr_redirected; + struct mgmt_conn_login_options negotiated_login_options; +} __packed; + +struct mgmt_session_login_options { + u8 flags; + u8 error_recovery_level; + u16 rsvd0; + u32 first_burst_length; + u32 max_burst_length; + u16 max_connections; + u16 max_outstanding_r2t; + u16 default_time2wait; + u16 default_time2retain; +} __packed; + +struct mgmt_session_info { + u32 session_handle; + u32 status; + u8 isid[6]; + u16 tsih; + u32 session_flags; + u16 conn_count; + u16 pad; + u8 target_name[224]; + u8 initiator_iscsiname[224]; + struct mgmt_session_login_options negotiated_login_options; + struct mgmt_conn_info conn_list[1]; +} __packed; + +struct be_cmd_req_get_session { + struct be_cmd_req_hdr hdr; + u32 session_handle; +} __packed; + +struct be_cmd_resp_get_session { + struct be_cmd_resp_hdr hdr; + struct mgmt_session_info session_info; +} __packed; + struct mac_addr { u16 size_of_struct; u8 addr[ETH_ALEN]; } __packed; +struct be_cmd_req_get_boot_target { + struct be_cmd_req_hdr hdr; +} __packed; + +struct be_cmd_resp_get_boot_target { + struct be_cmd_resp_hdr hdr; + u32 boot_session_count; + int boot_session_handle; +}; + struct be_cmd_req_mac_query { struct be_cmd_req_hdr hdr; u8 type; @@ -426,6 +531,11 @@ int be_poll_mcc(struct be_ctrl_info *ctrl); int mgmt_check_supported_fw(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba); unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba); +unsigned int beiscsi_get_boot_target(struct beiscsi_hba *phba); +unsigned int beiscsi_get_session_info(struct beiscsi_hba *phba, + u32 boot_session_handle, + struct be_dma_mem *nonemb_cmd); + void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag); /*ISCSI Functuions */ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl); @@ -601,14 +711,6 @@ struct be_eq_delay_params_in { struct eq_delay delay[8]; } __packed; -struct ip_address_format { - u16 size_of_structure; - u8 reserved; - u8 ip_type; - u8 ip_address[16]; - u32 rsvd0; -} __packed; - struct tcp_connect_and_offload_in { struct be_cmd_req_hdr hdr; struct ip_address_format ip_address; @@ -688,18 +790,29 @@ struct be_fw_cfg { u32 function_caps; } __packed; -#define CMD_ISCSI_COMMAND_INVALIDATE 1 -#define ISCSI_OPCODE_SCSI_DATA_OUT 5 +struct be_all_if_id { + struct be_cmd_req_hdr hdr; + u32 if_count; + u32 if_hndl_list[1]; +} __packed; + +#define ISCSI_OPCODE_SCSI_DATA_OUT 5 +#define OPCODE_COMMON_MODIFY_EQ_DELAY 41 +#define OPCODE_COMMON_ISCSI_CLEANUP 59 +#define OPCODE_COMMON_TCP_UPLOAD 56 #define OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD 70 -#define OPCODE_ISCSI_INI_DRIVER_OFFLOAD_SESSION 41 -#define OPCODE_COMMON_MODIFY_EQ_DELAY 41 -#define OPCODE_COMMON_ISCSI_CLEANUP 59 -#define OPCODE_COMMON_TCP_UPLOAD 56 #define OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS 1 -/* --- CMD_ISCSI_INVALIDATE_CONNECTION_TYPE --- */ -#define CMD_ISCSI_CONNECTION_INVALIDATE 0x8001 -#define CMD_ISCSI_CONNECTION_ISSUE_TCP_RST 0x8002 +#define OPCODE_ISCSI_INI_CFG_GET_HBA_NAME 6 +#define OPCODE_ISCSI_INI_CFG_SET_HBA_NAME 7 +#define OPCODE_ISCSI_INI_SESSION_GET_A_SESSION 14 +#define OPCODE_ISCSI_INI_DRIVER_OFFLOAD_SESSION 41 #define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42 +#define OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET 52 + +/* --- CMD_ISCSI_INVALIDATE_CONNECTION_TYPE --- */ +#define CMD_ISCSI_COMMAND_INVALIDATE 1 +#define CMD_ISCSI_CONNECTION_INVALIDATE 0x8001 +#define CMD_ISCSI_CONNECTION_ISSUE_TCP_RST 0x8002 #define INI_WR_CMD 1 /* Initiator write command */ #define INI_TMF_CMD 2 /* Initiator TMF command */ diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index 6d63e7b312c..7d4d2275573 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -300,40 +300,16 @@ int beiscsi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf) { struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost); - struct be_cmd_resp_get_mac_addr *resp; - struct be_mcc_wrb *wrb; - unsigned int tag, wrb_num; int len = 0; - unsigned short status, extd_status; - struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; + int status; SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param); switch (param) { case ISCSI_HOST_PARAM_HWADDRESS: - tag = be_cmd_get_mac_addr(phba); - if (!tag) { - SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n"); - return -EAGAIN; - } else - wait_event_interruptible(phba->ctrl.mcc_wait[tag], - phba->ctrl.mcc_numtag[tag]); - - wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16; - extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8; - status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; - if (status || extd_status) { - SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed" - " status = %d extd_status = %d\n", - status, extd_status); - free_mcc_tag(&phba->ctrl, tag); - return -EAGAIN; - } else { - wrb = queue_get_wrb(mccq, wrb_num); - free_mcc_tag(&phba->ctrl, tag); - resp = embedded_payload(wrb); - memcpy(phba->mac_address, resp->mac_address, ETH_ALEN); - len = sysfs_format_mac(buf, phba->mac_address, - ETH_ALEN); + status = beiscsi_get_macaddr(buf, phba); + if (status < 0) { + SE_DEBUG(DBG_LVL_1, "beiscsi_get_macaddr Failed\n"); + return status; } break; default: @@ -342,6 +318,48 @@ int beiscsi_get_host_param(struct Scsi_Host *shost, return len; } +int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba) +{ + struct be_cmd_resp_get_mac_addr *resp; + struct be_mcc_wrb *wrb; + unsigned int tag, wrb_num; + unsigned short status, extd_status; + struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; + int rc; + + if (phba->read_mac_address) + return sysfs_format_mac(buf, phba->mac_address, + ETH_ALEN); + + tag = be_cmd_get_mac_addr(phba); + if (!tag) { + SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n"); + return -EBUSY; + } else + wait_event_interruptible(phba->ctrl.mcc_wait[tag], + phba->ctrl.mcc_numtag[tag]); + + wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16; + extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8; + status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; + if (status || extd_status) { + SE_DEBUG(DBG_LVL_1, "Failed to get be_cmd_get_mac_addr" + " status = %d extd_status = %d\n", + status, extd_status); + free_mcc_tag(&phba->ctrl, tag); + return -EAGAIN; + } + wrb = queue_get_wrb(mccq, wrb_num); + free_mcc_tag(&phba->ctrl, tag); + resp = embedded_payload(wrb); + memcpy(phba->mac_address, resp->mac_address, ETH_ALEN); + rc = sysfs_format_mac(buf, phba->mac_address, + ETH_ALEN); + phba->read_mac_address = 1; + return rc; +} + + /** * beiscsi_conn_get_stats - get the iscsi stats * @cls_conn: pointer to iscsi cls conn diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h index 870cdb2a73e..8950a702b9f 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.h +++ b/drivers/scsi/be2iscsi/be_iscsi.h @@ -54,6 +54,8 @@ int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, int beiscsi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf); +int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba); + int beiscsi_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf, int buflen); diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 7436c5ad569..8220bde6c04 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -26,6 +26,7 @@ #include <linux/string.h> #include <linux/kernel.h> #include <linux/semaphore.h> +#include <linux/iscsi_boot_sysfs.h> #include <scsi/libiscsi.h> #include <scsi/scsi_transport_iscsi.h> @@ -211,6 +212,218 @@ unlock: return rc; } +static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf) +{ + struct beiscsi_hba *phba = data; + char *str = buf; + int rc; + + switch (type) { + case ISCSI_BOOT_TGT_NAME: + rc = sprintf(buf, "%.*s\n", + (int)strlen(phba->boot_sess.target_name), + (char *)&phba->boot_sess.target_name); + break; + case ISCSI_BOOT_TGT_IP_ADDR: + if (phba->boot_sess.conn_list[0].dest_ipaddr.ip_type == 0x1) + rc = sprintf(buf, "%pI4\n", + (char *)&phba->boot_sess.conn_list[0]. + dest_ipaddr.ip_address); + else + rc = sprintf(str, "%pI6\n", + (char *)&phba->boot_sess.conn_list[0]. + dest_ipaddr.ip_address); + break; + case ISCSI_BOOT_TGT_PORT: + rc = sprintf(str, "%d\n", phba->boot_sess.conn_list[0]. + dest_port); + break; + + case ISCSI_BOOT_TGT_CHAP_NAME: + rc = sprintf(str, "%.*s\n", + phba->boot_sess.conn_list[0]. + negotiated_login_options.auth_data.chap. + target_chap_name_length, + (char *)&phba->boot_sess.conn_list[0]. + negotiated_login_options.auth_data.chap. + target_chap_name); + break; + case ISCSI_BOOT_TGT_CHAP_SECRET: + rc = sprintf(str, "%.*s\n", + phba->boot_sess.conn_list[0]. + negotiated_login_options.auth_data.chap. + target_secret_length, + (char *)&phba->boot_sess.conn_list[0]. + negotiated_login_options.auth_data.chap. + target_secret); + + break; + case ISCSI_BOOT_TGT_REV_CHAP_NAME: + rc = sprintf(str, "%.*s\n", + phba->boot_sess.conn_list[0]. + negotiated_login_options.auth_data.chap. + intr_chap_name_length, + (char *)&phba->boot_sess.conn_list[0]. + negotiated_login_options.auth_data.chap. + intr_chap_name); + + break; + case ISCSI_BOOT_TGT_REV_CHAP_SECRET: + rc = sprintf(str, "%.*s\n", + phba->boot_sess.conn_list[0]. + negotiated_login_options.auth_data.chap. + intr_secret_length, + (char *)&phba->boot_sess.conn_list[0]. + negotiated_login_options.auth_data.chap. + intr_secret); + break; + case ISCSI_BOOT_TGT_FLAGS: + rc = sprintf(str, "2\n"); + break; + case ISCSI_BOOT_TGT_NIC_ASSOC: + rc = sprintf(str, "0\n"); + break; + default: + rc = -ENOSYS; + break; + } + return rc; +} + +static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf) +{ + struct beiscsi_hba *phba = data; + char *str = buf; + int rc; + + switch (type) { + case ISCSI_BOOT_INI_INITIATOR_NAME: + rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname); + break; + default: + rc = -ENOSYS; + break; + } + return rc; +} + +static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf) +{ + struct beiscsi_hba *phba = data; + char *str = buf; + int rc; + + switch (type) { + case ISCSI_BOOT_ETH_FLAGS: + rc = sprintf(str, "2\n"); + break; + case ISCSI_BOOT_ETH_INDEX: + rc = sprintf(str, "0\n"); + break; + case ISCSI_BOOT_ETH_MAC: + rc = beiscsi_get_macaddr(buf, phba); + if (rc < 0) { + SE_DEBUG(DBG_LVL_1, "beiscsi_get_macaddr Failed\n"); + return rc; + } + break; + default: + rc = -ENOSYS; + break; + } + return rc; +} + + +static mode_t beiscsi_tgt_get_attr_visibility(void *data, int type) +{ + int rc; + + switch (type) { + case ISCSI_BOOT_TGT_NAME: + case ISCSI_BOOT_TGT_IP_ADDR: + case ISCSI_BOOT_TGT_PORT: + case ISCSI_BOOT_TGT_CHAP_NAME: + case ISCSI_BOOT_TGT_CHAP_SECRET: + case ISCSI_BOOT_TGT_REV_CHAP_NAME: + case ISCSI_BOOT_TGT_REV_CHAP_SECRET: + case ISCSI_BOOT_TGT_NIC_ASSOC: + case ISCSI_BOOT_TGT_FLAGS: + rc = S_IRUGO; + break; + default: + rc = 0; + break; + } + return rc; +} + +static mode_t beiscsi_ini_get_attr_visibility(void *data, int type) +{ + int rc; + + switch (type) { + case ISCSI_BOOT_INI_INITIATOR_NAME: + rc = S_IRUGO; + break; + default: + rc = 0; + break; + } + return rc; +} + + +static mode_t beiscsi_eth_get_attr_visibility(void *data, int type) +{ + int rc; + + switch (type) { + case ISCSI_BOOT_ETH_FLAGS: + case ISCSI_BOOT_ETH_MAC: + case ISCSI_BOOT_ETH_INDEX: + rc = S_IRUGO; + break; + default: + rc = 0; + break; + } + return rc; +} + +static int beiscsi_setup_boot_info(struct beiscsi_hba *phba) +{ + struct iscsi_boot_kobj *boot_kobj; + + phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no); + if (!phba->boot_kset) + return -ENOMEM; + + /* get boot info using mgmt cmd */ + boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba, + beiscsi_show_boot_tgt_info, + beiscsi_tgt_get_attr_visibility); + if (!boot_kobj) + goto free_kset; + + boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba, + beiscsi_show_boot_ini_info, + beiscsi_ini_get_attr_visibility); + if (!boot_kobj) + goto free_kset; + + boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba, + beiscsi_show_boot_eth_info, + beiscsi_eth_get_attr_visibility); + if (!boot_kobj) + goto free_kset; + return 0; + +free_kset: + iscsi_boot_destroy_kset(phba->boot_kset); + return -ENOMEM; +} + /*------------------- PCI Driver operations and data ----------------- */ static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = { { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, @@ -268,6 +481,15 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) if (iscsi_host_add(shost, &phba->pcidev->dev)) goto free_devices; + + if (beiscsi_setup_boot_info(phba)) + /* + * log error but continue, because we may not be using + * iscsi boot. + */ + shost_printk(KERN_ERR, phba->shost, "Could not set up " + "iSCSI boot info."); + return phba; free_devices: @@ -3279,6 +3501,89 @@ static void hwi_disable_intr(struct beiscsi_hba *phba) "In hwi_disable_intr, Already Disabled\n"); } +static int beiscsi_get_boot_info(struct beiscsi_hba *phba) +{ + struct be_cmd_resp_get_boot_target *boot_resp; + struct be_cmd_resp_get_session *session_resp; + struct be_mcc_wrb *wrb; + struct be_dma_mem nonemb_cmd; + unsigned int tag, wrb_num; + unsigned short status, extd_status; + struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; + + tag = beiscsi_get_boot_target(phba); + if (!tag) { + SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n"); + return -EAGAIN; + } else + wait_event_interruptible(phba->ctrl.mcc_wait[tag], + phba->ctrl.mcc_numtag[tag]); + + wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16; + extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8; + status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; + if (status || extd_status) { + SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed" + " status = %d extd_status = %d\n", + status, extd_status); + free_mcc_tag(&phba->ctrl, tag); + return -EBUSY; + } + wrb = queue_get_wrb(mccq, wrb_num); + free_mcc_tag(&phba->ctrl, tag); + boot_resp = embedded_payload(wrb); + + if (boot_resp->boot_session_handle < 0) { + printk(KERN_ERR "No Boot Session for this pci_func," + "session Hndl = %d\n", boot_resp->boot_session_handle); + return -ENXIO; + } + + nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, + sizeof(*session_resp), + &nonemb_cmd.dma); + if (nonemb_cmd.va == NULL) { + SE_DEBUG(DBG_LVL_1, + "Failed to allocate memory for" + "beiscsi_get_session_info\n"); + return -ENOMEM; + } + + memset(nonemb_cmd.va, 0, sizeof(*session_resp)); + tag = beiscsi_get_session_info(phba, + boot_resp->boot_session_handle, &nonemb_cmd); + if (!tag) { + SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info" + " Failed\n"); + goto boot_freemem; + } else + wait_event_interruptible(phba->ctrl.mcc_wait[tag], + phba->ctrl.mcc_numtag[tag]); + + wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16; + extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8; + status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; + if (status || extd_status) { + SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info Failed" + " status = %d extd_status = %d\n", + status, extd_status); + free_mcc_tag(&phba->ctrl, tag); + goto boot_freemem; + } + wrb = queue_get_wrb(mccq, wrb_num); + free_mcc_tag(&phba->ctrl, tag); + session_resp = nonemb_cmd.va ; + memcpy(&phba->boot_sess, &session_resp->session_info, + sizeof(struct mgmt_session_info)); + pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, + nonemb_cmd.va, nonemb_cmd.dma); + return 0; +boot_freemem: + pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, + nonemb_cmd.va, nonemb_cmd.dma); + return -ENOMEM; +} + static int beiscsi_init_port(struct beiscsi_hba *phba) { int ret; @@ -3841,6 +4146,7 @@ static void beiscsi_remove(struct pci_dev *pcidev) iscsi_host_remove(phba->shost); pci_dev_put(phba->pcidev); iscsi_host_free(phba->shost); + iscsi_boot_destroy_kset(phba->boot_kset); } static void beiscsi_msix_enable(struct beiscsi_hba *phba) @@ -3996,6 +4302,11 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, goto free_blkenbld; } hwi_enable_intr(phba); + ret = beiscsi_get_boot_info(phba); + if (ret < 0) { + shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" + "No Boot Devices !!!!!\n"); + } SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n"); return 0; diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index c643bb3736f..90eb74f6bca 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -35,7 +35,7 @@ #include "be.h" #define DRV_NAME "be2iscsi" -#define BUILD_STR "2.0.527.0" +#define BUILD_STR "2.0.549.0" #define BE_NAME "ServerEngines BladeEngine2" \ "Linux iSCSI Driver version" BUILD_STR #define DRV_DESC BE_NAME " " "Driver" @@ -63,7 +63,7 @@ #define BEISCSI_SGLIST_ELEMENTS 30 #define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */ -#define BEISCSI_MAX_SECTORS 256 /* scsi_host->max_sectors */ +#define BEISCSI_MAX_SECTORS 2048 /* scsi_host->max_sectors */ #define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */ #define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */ @@ -312,6 +312,7 @@ struct beiscsi_hba { struct list_head hba_queue; unsigned short *cid_array; struct iscsi_endpoint **ep_array; + struct iscsi_boot_kset *boot_kset; struct Scsi_Host *shost; struct { /** @@ -342,6 +343,8 @@ struct beiscsi_hba { struct work_struct work_cqs; /* The work being queued */ struct be_ctrl_info ctrl; unsigned int generation; + unsigned int read_mac_address; + struct mgmt_session_info boot_sess; struct invalidate_command_table inv_tbl[128]; }; diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 3f3fab91a7d..26350e470bc 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -20,6 +20,77 @@ #include "be_mgmt.h" #include "be_iscsi.h" +#include <scsi/scsi_transport_iscsi.h> + +unsigned int beiscsi_get_boot_target(struct beiscsi_hba *phba) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct be_mcc_wrb *wrb; + struct be_cmd_req_get_mac_addr *req; + unsigned int tag = 0; + + SE_DEBUG(DBG_LVL_8, "In bescsi_get_boot_target\n"); + spin_lock(&ctrl->mbox_lock); + tag = alloc_mcc_tag(phba); + if (!tag) { + spin_unlock(&ctrl->mbox_lock); + return tag; + } + + wrb = wrb_from_mccq(phba); + req = embedded_payload(wrb); + wrb->tag0 |= tag; + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, + OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET, + sizeof(*req)); + + be_mcc_notify(phba); + spin_unlock(&ctrl->mbox_lock); + return tag; +} + +unsigned int beiscsi_get_session_info(struct beiscsi_hba *phba, + u32 boot_session_handle, + struct be_dma_mem *nonemb_cmd) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct be_mcc_wrb *wrb; + unsigned int tag = 0; + struct be_cmd_req_get_session *req; + struct be_cmd_resp_get_session *resp; + struct be_sge *sge; + + SE_DEBUG(DBG_LVL_8, "In beiscsi_get_session_info\n"); + spin_lock(&ctrl->mbox_lock); + tag = alloc_mcc_tag(phba); + if (!tag) { + spin_unlock(&ctrl->mbox_lock); + return tag; + } + + nonemb_cmd->size = sizeof(*resp); + req = nonemb_cmd->va; + memset(req, 0, sizeof(*req)); + wrb = wrb_from_mccq(phba); + sge = nonembedded_sgl(wrb); + wrb->tag0 |= tag; + + + wrb->tag0 |= tag; + be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, + OPCODE_ISCSI_INI_SESSION_GET_A_SESSION, + sizeof(*resp)); + req->session_handle = boot_session_handle; + sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); + sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); + sge->len = cpu_to_le32(nonemb_cmd->size); + + be_mcc_notify(phba); + spin_unlock(&ctrl->mbox_lock); + return tag; +} int mgmt_get_fw_config(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba) diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index bd96cecaa61..9f75a6d519a 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -433,6 +433,9 @@ static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt, { switch (tgt->action) { case IBMVFC_TGT_ACTION_DEL_RPORT: + if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) + tgt->action = action; + case IBMVFC_TGT_ACTION_DELETED_RPORT: break; default: if (action == IBMVFC_TGT_ACTION_DEL_RPORT) @@ -2036,95 +2039,108 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc) } /** - * ibmvfc_abort_task_set - Abort outstanding commands to the device - * @sdev: scsi device to abort commands - * - * This sends an Abort Task Set to the VIOS for the specified device. This does - * NOT send any cancel to the VIOS. That must be done separately. + * ibmvfc_match_rport - Match function for specified remote port + * @evt: ibmvfc event struct + * @device: device to match (rport) * * Returns: - * 0 on success / other on failure + * 1 if event matches rport / 0 if event does not match rport **/ -static int ibmvfc_abort_task_set(struct scsi_device *sdev) +static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport) { - struct ibmvfc_host *vhost = shost_priv(sdev->host); - struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); - struct ibmvfc_cmd *tmf; - struct ibmvfc_event *evt, *found_evt; - union ibmvfc_iu rsp_iu; - struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp; - int rsp_rc = -EBUSY; - unsigned long flags; - int rsp_code = 0; + struct fc_rport *cmd_rport; - spin_lock_irqsave(vhost->host->host_lock, flags); - found_evt = NULL; - list_for_each_entry(evt, &vhost->sent, queue) { - if (evt->cmnd && evt->cmnd->device == sdev) { - found_evt = evt; - break; - } - } - - if (!found_evt) { - if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) - sdev_printk(KERN_INFO, sdev, "No events found to abort\n"); - spin_unlock_irqrestore(vhost->host->host_lock, flags); - return 0; - } - - if (vhost->state == IBMVFC_ACTIVE) { - evt = ibmvfc_get_event(vhost); - ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT); - - tmf = &evt->iu.cmd; - memset(tmf, 0, sizeof(*tmf)); - tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp); - tmf->resp.len = sizeof(tmf->rsp); - tmf->frame_type = IBMVFC_SCSI_FCP_TYPE; - tmf->payload_len = sizeof(tmf->iu); - tmf->resp_len = sizeof(tmf->rsp); - tmf->cancel_key = (unsigned long)sdev->hostdata; - tmf->tgt_scsi_id = rport->port_id; - int_to_scsilun(sdev->lun, &tmf->iu.lun); - tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF); - tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET; - evt->sync_iu = &rsp_iu; - - init_completion(&evt->comp); - rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout); + if (evt->cmnd) { + cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device)); + if (cmd_rport == rport) + return 1; } + return 0; +} - spin_unlock_irqrestore(vhost->host->host_lock, flags); +/** + * ibmvfc_match_target - Match function for specified target + * @evt: ibmvfc event struct + * @device: device to match (starget) + * + * Returns: + * 1 if event matches starget / 0 if event does not match starget + **/ +static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device) +{ + if (evt->cmnd && scsi_target(evt->cmnd->device) == device) + return 1; + return 0; +} - if (rsp_rc != 0) { - sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc); - return -EIO; - } +/** + * ibmvfc_match_lun - Match function for specified LUN + * @evt: ibmvfc event struct + * @device: device to match (sdev) + * + * Returns: + * 1 if event matches sdev / 0 if event does not match sdev + **/ +static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device) +{ + if (evt->cmnd && evt->cmnd->device == device) + return 1; + return 0; +} - sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n"); - wait_for_completion(&evt->comp); +/** + * ibmvfc_wait_for_ops - Wait for ops to complete + * @vhost: ibmvfc host struct + * @device: device to match (starget or sdev) + * @match: match function + * + * Returns: + * SUCCESS / FAILED + **/ +static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device, + int (*match) (struct ibmvfc_event *, void *)) +{ + struct ibmvfc_event *evt; + DECLARE_COMPLETION_ONSTACK(comp); + int wait; + unsigned long flags; + signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ; - if (rsp_iu.cmd.status) - rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd); + ENTER; + do { + wait = 0; + spin_lock_irqsave(vhost->host->host_lock, flags); + list_for_each_entry(evt, &vhost->sent, queue) { + if (match(evt, device)) { + evt->eh_comp = ∁ + wait++; + } + } + spin_unlock_irqrestore(vhost->host->host_lock, flags); - if (rsp_code) { - if (fc_rsp->flags & FCP_RSP_LEN_VALID) - rsp_code = fc_rsp->data.info.rsp_code; + if (wait) { + timeout = wait_for_completion_timeout(&comp, timeout); - sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) " - "flags: %x fcp_rsp: %x, scsi_status: %x\n", - ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error), - rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code, - fc_rsp->scsi_status); - rsp_rc = -EIO; - } else - sdev_printk(KERN_INFO, sdev, "Abort successful\n"); + if (!timeout) { + wait = 0; + spin_lock_irqsave(vhost->host->host_lock, flags); + list_for_each_entry(evt, &vhost->sent, queue) { + if (match(evt, device)) { + evt->eh_comp = NULL; + wait++; + } + } + spin_unlock_irqrestore(vhost->host->host_lock, flags); + if (wait) + dev_err(vhost->dev, "Timed out waiting for aborted commands\n"); + LEAVE; + return wait ? FAILED : SUCCESS; + } + } + } while (wait); - spin_lock_irqsave(vhost->host->host_lock, flags); - ibmvfc_free_event(evt); - spin_unlock_irqrestore(vhost->host->host_lock, flags); - return rsp_rc; + LEAVE; + return SUCCESS; } /** @@ -2212,88 +2228,130 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) } /** - * ibmvfc_match_target - Match function for specified target + * ibmvfc_match_key - Match function for specified cancel key * @evt: ibmvfc event struct - * @device: device to match (starget) + * @key: cancel key to match * * Returns: - * 1 if event matches starget / 0 if event does not match starget + * 1 if event matches key / 0 if event does not match key **/ -static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device) +static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key) { - if (evt->cmnd && scsi_target(evt->cmnd->device) == device) - return 1; - return 0; -} + unsigned long cancel_key = (unsigned long)key; -/** - * ibmvfc_match_lun - Match function for specified LUN - * @evt: ibmvfc event struct - * @device: device to match (sdev) - * - * Returns: - * 1 if event matches sdev / 0 if event does not match sdev - **/ -static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device) -{ - if (evt->cmnd && evt->cmnd->device == device) + if (evt->crq.format == IBMVFC_CMD_FORMAT && + evt->iu.cmd.cancel_key == cancel_key) return 1; return 0; } /** - * ibmvfc_wait_for_ops - Wait for ops to complete - * @vhost: ibmvfc host struct - * @device: device to match (starget or sdev) - * @match: match function + * ibmvfc_abort_task_set - Abort outstanding commands to the device + * @sdev: scsi device to abort commands + * + * This sends an Abort Task Set to the VIOS for the specified device. This does + * NOT send any cancel to the VIOS. That must be done separately. * * Returns: - * SUCCESS / FAILED + * 0 on success / other on failure **/ -static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device, - int (*match) (struct ibmvfc_event *, void *)) +static int ibmvfc_abort_task_set(struct scsi_device *sdev) { - struct ibmvfc_event *evt; - DECLARE_COMPLETION_ONSTACK(comp); - int wait; - unsigned long flags; - signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ; + struct ibmvfc_host *vhost = shost_priv(sdev->host); + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + struct ibmvfc_cmd *tmf; + struct ibmvfc_event *evt, *found_evt; + union ibmvfc_iu rsp_iu; + struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp; + int rc, rsp_rc = -EBUSY; + unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT; + int rsp_code = 0; - ENTER; - do { - wait = 0; - spin_lock_irqsave(vhost->host->host_lock, flags); - list_for_each_entry(evt, &vhost->sent, queue) { - if (match(evt, device)) { - evt->eh_comp = ∁ - wait++; - } + spin_lock_irqsave(vhost->host->host_lock, flags); + found_evt = NULL; + list_for_each_entry(evt, &vhost->sent, queue) { + if (evt->cmnd && evt->cmnd->device == sdev) { + found_evt = evt; + break; } + } + + if (!found_evt) { + if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) + sdev_printk(KERN_INFO, sdev, "No events found to abort\n"); spin_unlock_irqrestore(vhost->host->host_lock, flags); + return 0; + } - if (wait) { - timeout = wait_for_completion_timeout(&comp, timeout); + if (vhost->state == IBMVFC_ACTIVE) { + evt = ibmvfc_get_event(vhost); + ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT); - if (!timeout) { - wait = 0; - spin_lock_irqsave(vhost->host->host_lock, flags); - list_for_each_entry(evt, &vhost->sent, queue) { - if (match(evt, device)) { - evt->eh_comp = NULL; - wait++; - } - } - spin_unlock_irqrestore(vhost->host->host_lock, flags); - if (wait) - dev_err(vhost->dev, "Timed out waiting for aborted commands\n"); - LEAVE; - return wait ? FAILED : SUCCESS; - } + tmf = &evt->iu.cmd; + memset(tmf, 0, sizeof(*tmf)); + tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp); + tmf->resp.len = sizeof(tmf->rsp); + tmf->frame_type = IBMVFC_SCSI_FCP_TYPE; + tmf->payload_len = sizeof(tmf->iu); + tmf->resp_len = sizeof(tmf->rsp); + tmf->cancel_key = (unsigned long)sdev->hostdata; + tmf->tgt_scsi_id = rport->port_id; + int_to_scsilun(sdev->lun, &tmf->iu.lun); + tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF); + tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET; + evt->sync_iu = &rsp_iu; + + init_completion(&evt->comp); + rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout); + } + + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + if (rsp_rc != 0) { + sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc); + return -EIO; + } + + sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n"); + timeout = wait_for_completion_timeout(&evt->comp, timeout); + + if (!timeout) { + rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); + if (!rc) { + rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key); + if (rc == SUCCESS) + rc = 0; } - } while (wait); - LEAVE; - return SUCCESS; + if (rc) { + sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n"); + ibmvfc_reset_host(vhost); + rsp_rc = 0; + goto out; + } + } + + if (rsp_iu.cmd.status) + rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd); + + if (rsp_code) { + if (fc_rsp->flags & FCP_RSP_LEN_VALID) + rsp_code = fc_rsp->data.info.rsp_code; + + sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) " + "flags: %x fcp_rsp: %x, scsi_status: %x\n", + ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error), + rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code, + fc_rsp->scsi_status); + rsp_rc = -EIO; + } else + sdev_printk(KERN_INFO, sdev, "Abort successful\n"); + +out: + spin_lock_irqsave(vhost->host->host_lock, flags); + ibmvfc_free_event(evt); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return rsp_rc; } /** @@ -2351,18 +2409,6 @@ static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd) } /** - * ibmvfc_dev_cancel_all_abts - Device iterated cancel all function - * @sdev: scsi device struct - * @data: return code - * - **/ -static void ibmvfc_dev_cancel_all_abts(struct scsi_device *sdev, void *data) -{ - unsigned long *rc = data; - *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); -} - -/** * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function * @sdev: scsi device struct * @data: return code @@ -2375,18 +2421,6 @@ static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data) } /** - * ibmvfc_dev_abort_all - Device iterated abort task set function - * @sdev: scsi device struct - * @data: return code - * - **/ -static void ibmvfc_dev_abort_all(struct scsi_device *sdev, void *data) -{ - unsigned long *rc = data; - *rc |= ibmvfc_abort_task_set(sdev); -} - -/** * ibmvfc_eh_target_reset_handler - Reset the target * @cmd: scsi command struct * @@ -2440,19 +2474,22 @@ static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd) **/ static void ibmvfc_terminate_rport_io(struct fc_rport *rport) { - struct scsi_target *starget = to_scsi_target(&rport->dev); - struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct Scsi_Host *shost = rport_to_shost(rport); struct ibmvfc_host *vhost = shost_priv(shost); - unsigned long cancel_rc = 0; - unsigned long abort_rc = 0; - int rc = FAILED; + struct fc_rport *dev_rport; + struct scsi_device *sdev; + unsigned long rc; ENTER; - starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_abts); - starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all); + shost_for_each_device(sdev, shost) { + dev_rport = starget_to_rport(scsi_target(sdev)); + if (dev_rport != rport) + continue; + ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); + ibmvfc_abort_task_set(sdev); + } - if (!cancel_rc && !abort_rc) - rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target); + rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport); if (rc == FAILED) ibmvfc_issue_fc_host_lip(shost); @@ -4193,11 +4230,15 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) { tgt_dbg(tgt, "Deleting rport\n"); list_del(&tgt->queue); + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT); spin_unlock_irqrestore(vhost->host->host_lock, flags); fc_remote_port_delete(rport); del_timer_sync(&tgt->timer); kref_put(&tgt->kref, ibmvfc_release_tgt); return; + } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return; } if (rport) { @@ -4297,6 +4338,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) rport = tgt->rport; tgt->rport = NULL; list_del(&tgt->queue); + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT); spin_unlock_irqrestore(vhost->host->host_lock, flags); if (rport) fc_remote_port_delete(rport); diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index d7e8dcd9065..608af394c8c 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -29,8 +29,8 @@ #include "viosrp.h" #define IBMVFC_NAME "ibmvfc" -#define IBMVFC_DRIVER_VERSION "1.0.8" -#define IBMVFC_DRIVER_DATE "(June 17, 2010)" +#define IBMVFC_DRIVER_VERSION "1.0.9" +#define IBMVFC_DRIVER_DATE "(August 5, 2010)" #define IBMVFC_DEFAULT_TIMEOUT 60 #define IBMVFC_ADISC_CANCEL_TIMEOUT 45 @@ -38,6 +38,7 @@ #define IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT \ (IBMVFC_ADISC_TIMEOUT + IBMVFC_ADISC_CANCEL_TIMEOUT) #define IBMVFC_INIT_TIMEOUT 120 +#define IBMVFC_ABORT_TIMEOUT 8 #define IBMVFC_ABORT_WAIT_TIMEOUT 40 #define IBMVFC_MAX_REQUESTS_DEFAULT 100 @@ -597,6 +598,7 @@ enum ibmvfc_target_action { IBMVFC_TGT_ACTION_INIT, IBMVFC_TGT_ACTION_INIT_WAIT, IBMVFC_TGT_ACTION_DEL_RPORT, + IBMVFC_TGT_ACTION_DELETED_RPORT, }; struct ibmvfc_target { diff --git a/drivers/firmware/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c index df6bff7366c..df6bff7366c 100644 --- a/drivers/firmware/iscsi_boot_sysfs.c +++ b/drivers/scsi/iscsi_boot_sysfs.c diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index eac4d09314e..c797f6b48f0 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -1765,14 +1765,14 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) struct fcoe_dev_stats *stats; lport = shost_priv(sc_cmd->device->host); - spin_unlock_irq(lport->host->host_lock); rval = fc_remote_port_chkready(rport); if (rval) { sc_cmd->result = rval; done(sc_cmd); - goto out; + return 0; } + spin_unlock_irq(lport->host->host_lock); if (!*(struct fc_remote_port **)rport->dd_data) { /* diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 3482d5a5aed..a50aa03b8ac 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -775,6 +775,7 @@ struct lpfc_hba { uint8_t temp_sensor_support; /* Fields used for heart beat. */ unsigned long last_completion_time; + unsigned long skipped_hb; struct timer_list hb_tmofunc; uint8_t hb_outstanding; enum hba_temp_state over_temp_state; @@ -817,6 +818,8 @@ struct lpfc_hba { uint32_t iocb_cnt; uint32_t iocb_max; atomic_t sdev_cnt; + uint8_t fips_spec_rev; + uint8_t fips_level; }; static inline struct Scsi_Host * diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index ad05b266e95..23ce4570833 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1240,6 +1240,44 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr, } /** + * lpfc_fips_level_show - Return the current FIPS level for the HBA + * @dev: class unused variable. + * @attr: device attribute, not used. + * @buf: on return contains the module description text. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_fips_level_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level); +} + +/** + * lpfc_fips_rev_show - Return the FIPS Spec revision for the HBA + * @dev: class unused variable. + * @attr: device attribute, not used. + * @buf: on return contains the module description text. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev); +} + +/** * lpfc_param_show - Return a cfg attribute value in decimal * * Description: @@ -1677,6 +1715,8 @@ static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL); static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL); static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL); static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL); +static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL); +static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL); static char *lpfc_soft_wwn_key = "C99G71SL8032A"; @@ -3278,7 +3318,7 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); # - Default will result in registering capabilities for all profiles. # */ -unsigned int lpfc_prot_mask = SHOST_DIX_TYPE0_PROTECTION; +unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION; module_param(lpfc_prot_mask, uint, 0); MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask"); @@ -3383,6 +3423,8 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_iocb_hw, &dev_attr_txq_hw, &dev_attr_txcmplq_hw, + &dev_attr_lpfc_fips_level, + &dev_attr_lpfc_fips_rev, NULL, }; @@ -3409,6 +3451,8 @@ struct device_attribute *lpfc_vport_attrs[] = { &dev_attr_lpfc_max_scsicmpl_time, &dev_attr_lpfc_stat_data_ctrl, &dev_attr_lpfc_static_vport, + &dev_attr_lpfc_fips_level, + &dev_attr_lpfc_fips_rev, NULL, }; diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index d521569e662..49d0cf99c24 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -2724,15 +2724,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, pmboxq->context2 = ext; pmboxq->in_ext_byte_len = - mbox_req->inExtWLen * - sizeof(uint32_t); - pmboxq->out_ext_byte_len = - mbox_req->outExtWLen * - sizeof(uint32_t); - pmboxq->mbox_offset_word = - mbox_req->mbOffset; - pmboxq->context2 = ext; - pmboxq->in_ext_byte_len = mbox_req->inExtWLen * sizeof(uint32_t); pmboxq->out_ext_byte_len = mbox_req->outExtWLen * sizeof(uint32_t); diff --git a/drivers/scsi/lpfc/lpfc_compat.h b/drivers/scsi/lpfc/lpfc_compat.h index a11f1ae7b98..75e2e569ded 100644 --- a/drivers/scsi/lpfc/lpfc_compat.h +++ b/drivers/scsi/lpfc/lpfc_compat.h @@ -82,8 +82,7 @@ lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes) static inline void lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes) { - /* actually returns 1 byte past dest */ - memcpy_toio( dest, src, bytes); + __iowrite32_copy(dest, src, bytes); } static inline void diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index afbed6bc31f..8d09191c327 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -600,6 +600,14 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; spin_unlock_irq(shost->host_lock); } + } else if ((phba->sli_rev == LPFC_SLI_REV4) && + !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { + /* + * Driver needs to re-reg VPI in order for f/w + * to update the MAC address. + */ + lpfc_register_new_vport(phba, vport, ndlp); + return 0; } if (phba->sli_rev < LPFC_SLI_REV4) { @@ -801,9 +809,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) { lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, "2611 FLOGI failed on registered " - "FCF record fcf_index:%d, trying " - "to perform round robin failover\n", - phba->fcf.current_rec.fcf_indx); + "FCF record fcf_index(%d), status: " + "x%x/x%x, tmo:x%x, trying to perform " + "round robin failover\n", + phba->fcf.current_rec.fcf_indx, + irsp->ulpStatus, irsp->un.ulpWord[4], + irsp->ulpTimeout); fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { /* @@ -841,6 +852,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } } + /* FLOGI failure */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "2858 FLOGI failure Status:x%x/x%x TMO:x%x\n", + irsp->ulpStatus, irsp->un.ulpWord[4], + irsp->ulpTimeout); + /* Check for retry */ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) goto out; @@ -1291,6 +1308,8 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, struct serv_parm *sp; uint8_t name[sizeof(struct lpfc_name)]; uint32_t rc, keepDID = 0; + int put_node; + int put_rport; /* Fabric nodes can have the same WWPN so we don't bother searching * by WWPN. Just return the ndlp that was given to us. @@ -1379,6 +1398,28 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, /* Two ndlps cannot have the same did */ ndlp->nlp_DID = keepDID; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + /* Since we are swapping the ndlp passed in with the new one + * and the did has already been swapped, copy over the + * state and names. + */ + memcpy(&new_ndlp->nlp_portname, &ndlp->nlp_portname, + sizeof(struct lpfc_name)); + memcpy(&new_ndlp->nlp_nodename, &ndlp->nlp_nodename, + sizeof(struct lpfc_name)); + new_ndlp->nlp_state = ndlp->nlp_state; + /* Fix up the rport accordingly */ + rport = ndlp->rport; + if (rport) { + rdata = rport->dd_data; + put_node = rdata->pnode != NULL; + put_rport = ndlp->rport != NULL; + rdata->pnode = NULL; + ndlp->rport = NULL; + if (put_node) + lpfc_nlp_put(ndlp); + if (put_rport) + put_device(&rport->dev); + } } return new_ndlp; } @@ -2880,6 +2921,17 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, retry = 0; if (retry) { + if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { + /* Stop retrying PLOGI and FDISC if in FCF discovery */ + if (phba->fcf.fcf_flag & FCF_DISCOVERY) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "2849 Stop retry ELS command " + "x%x to remote NPORT x%x, " + "Data: x%x x%x\n", cmd, did, + cmdiocb->retry, delay); + return 0; + } + } /* Retry ELS command <elsCmd> to remote NPORT <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, @@ -6076,8 +6128,12 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (mb->mbxStatus) { lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, - "0915 Register VPI failed: 0x%x\n", - mb->mbxStatus); + "0915 Register VPI failed : Status: x%x" + " upd bit: x%x \n", mb->mbxStatus, + mb->un.varRegVpi.upd); + if (phba->sli_rev == LPFC_SLI_REV4 && + mb->un.varRegVpi.upd) + goto mbox_err_exit ; switch (mb->mbxStatus) { case 0x11: /* unsupported feature */ @@ -6142,7 +6198,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) } else lpfc_do_scr_ns_plogi(phba, vport); } - +mbox_err_exit: /* Now, we decrement the ndlp reference count held for this * callback function */ @@ -6387,6 +6443,14 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, else vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; spin_unlock_irq(shost->host_lock); + } else if ((phba->sli_rev == LPFC_SLI_REV4) && + !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { + /* + * Driver needs to re-reg VPI in order for f/w + * to update the MAC address. + */ + lpfc_register_new_vport(phba, vport, ndlp); + return ; } if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 0639c994349..1f62ea8c165 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -588,7 +588,7 @@ lpfc_work_done(struct lpfc_hba *phba) (status & HA_RXMASK)); } - if (pring->txq_cnt) + if ((phba->sli_rev == LPFC_SLI_REV4) && pring->txq_cnt) lpfc_drain_txq(phba); /* * Turn on Ring interrupts @@ -1852,8 +1852,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) /* If in fast failover, mark it's completed */ - phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | - FCF_DISCOVERY); + phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2836 The new FCF record (x%x) " @@ -2651,7 +2650,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2778 Start FCF table scan at linkup\n"); - rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); if (rc) { @@ -2660,6 +2658,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) spin_unlock_irq(&phba->hbalock); goto out; } + /* Reset FCF roundrobin bmask for new discovery */ + memset(phba->fcf.fcf_rr_bmask, 0, + sizeof(*phba->fcf.fcf_rr_bmask)); } return; @@ -5097,6 +5098,7 @@ static void lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct lpfc_vport *vport = mboxq->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if (mboxq->u.mb.mbxStatus) { lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, @@ -5104,6 +5106,9 @@ lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) "HBA state x%x\n", mboxq->u.mb.mbxStatus, vport->port_state); } + spin_lock_irq(shost->host_lock); + phba->pport->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); mempool_free(mboxq, phba->mbox_mem_pool); return; } @@ -5285,6 +5290,10 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba) spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag |= FCF_INIT_DISC; spin_unlock_irq(&phba->hbalock); + + /* Reset FCF roundrobin bmask for new discovery */ + memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask)); + rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); if (rc) { diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index f5dbf2be3ea..1676f61291e 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -2291,7 +2291,8 @@ typedef struct { typedef struct { #ifdef __BIG_ENDIAN_BITFIELD uint32_t rsvd1; - uint32_t rsvd2:8; + uint32_t rsvd2:7; + uint32_t upd:1; uint32_t sid:24; uint32_t wwn[2]; uint32_t rsvd5; @@ -2300,7 +2301,8 @@ typedef struct { #else /* __LITTLE_ENDIAN */ uint32_t rsvd1; uint32_t sid:24; - uint32_t rsvd2:8; + uint32_t upd:1; + uint32_t rsvd2:7; uint32_t wwn[2]; uint32_t rsvd5; uint16_t vpi; @@ -2806,11 +2808,15 @@ typedef struct { uint32_t rsvd6; /* Reserved */ #ifdef __BIG_ENDIAN_BITFIELD - uint32_t rsvd7 : 16; /* Reserved */ + uint32_t fips_rev : 3; /* FIPS Spec Revision */ + uint32_t fips_level : 4; /* FIPS Level */ + uint32_t sec_err : 9; /* security crypto error */ uint32_t max_vpi : 16; /* Max number of virt N-Ports */ #else /* __LITTLE_ENDIAN */ uint32_t max_vpi : 16; /* Max number of virt N-Ports */ - uint32_t rsvd7 : 16; /* Reserved */ + uint32_t sec_err : 9; /* security crypto error */ + uint32_t fips_level : 4; /* FIPS Level */ + uint32_t fips_rev : 3; /* FIPS Spec Revision */ #endif } CONFIG_PORT_VAR; @@ -3441,63 +3447,63 @@ struct sli3_bg_fields { static inline uint32_t lpfc_bgs_get_bidir_bg_prof(uint32_t bgstat) { - return (le32_to_cpu(bgstat) & BGS_BIDIR_BG_PROF_MASK) >> + return (bgstat & BGS_BIDIR_BG_PROF_MASK) >> BGS_BIDIR_BG_PROF_SHIFT; } static inline uint32_t lpfc_bgs_get_bidir_err_cond(uint32_t bgstat) { - return (le32_to_cpu(bgstat) & BGS_BIDIR_ERR_COND_FLAGS_MASK) >> + return (bgstat & BGS_BIDIR_ERR_COND_FLAGS_MASK) >> BGS_BIDIR_ERR_COND_SHIFT; } static inline uint32_t lpfc_bgs_get_bg_prof(uint32_t bgstat) { - return (le32_to_cpu(bgstat) & BGS_BG_PROFILE_MASK) >> + return (bgstat & BGS_BG_PROFILE_MASK) >> BGS_BG_PROFILE_SHIFT; } static inline uint32_t lpfc_bgs_get_invalid_prof(uint32_t bgstat) { - return (le32_to_cpu(bgstat) & BGS_INVALID_PROF_MASK) >> + return (bgstat & BGS_INVALID_PROF_MASK) >> BGS_INVALID_PROF_SHIFT; } static inline uint32_t lpfc_bgs_get_uninit_dif_block(uint32_t bgstat) { - return (le32_to_cpu(bgstat) & BGS_UNINIT_DIF_BLOCK_MASK) >> + return (bgstat & BGS_UNINIT_DIF_BLOCK_MASK) >> BGS_UNINIT_DIF_BLOCK_SHIFT; } static inline uint32_t lpfc_bgs_get_hi_water_mark_present(uint32_t bgstat) { - return (le32_to_cpu(bgstat) & BGS_HI_WATER_MARK_PRESENT_MASK) >> + return (bgstat & BGS_HI_WATER_MARK_PRESENT_MASK) >> BGS_HI_WATER_MARK_PRESENT_SHIFT; } static inline uint32_t lpfc_bgs_get_reftag_err(uint32_t bgstat) { - return (le32_to_cpu(bgstat) & BGS_REFTAG_ERR_MASK) >> + return (bgstat & BGS_REFTAG_ERR_MASK) >> BGS_REFTAG_ERR_SHIFT; } static inline uint32_t lpfc_bgs_get_apptag_err(uint32_t bgstat) { - return (le32_to_cpu(bgstat) & BGS_APPTAG_ERR_MASK) >> + return (bgstat & BGS_APPTAG_ERR_MASK) >> BGS_APPTAG_ERR_SHIFT; } static inline uint32_t lpfc_bgs_get_guard_err(uint32_t bgstat) { - return (le32_to_cpu(bgstat) & BGS_GUARD_ERR_MASK) >> + return (bgstat & BGS_GUARD_ERR_MASK) >> BGS_GUARD_ERR_SHIFT; } diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 2786ee3b605..da9ba06ad58 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1032,27 +1032,46 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) /* If there is no heart beat outstanding, issue a heartbeat command */ if (phba->cfg_enable_hba_heartbeat) { if (!phba->hb_outstanding) { - pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); - if (!pmboxq) { - mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); - return; - } + if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && + (list_empty(&psli->mboxq))) { + pmboxq = mempool_alloc(phba->mbox_mem_pool, + GFP_KERNEL); + if (!pmboxq) { + mod_timer(&phba->hb_tmofunc, + jiffies + + HZ * LPFC_HB_MBOX_INTERVAL); + return; + } - lpfc_heart_beat(phba, pmboxq); - pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; - pmboxq->vport = phba->pport; - retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); + lpfc_heart_beat(phba, pmboxq); + pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; + pmboxq->vport = phba->pport; + retval = lpfc_sli_issue_mbox(phba, pmboxq, + MBX_NOWAIT); + + if (retval != MBX_BUSY && + retval != MBX_SUCCESS) { + mempool_free(pmboxq, + phba->mbox_mem_pool); + mod_timer(&phba->hb_tmofunc, + jiffies + + HZ * LPFC_HB_MBOX_INTERVAL); + return; + } + phba->skipped_hb = 0; + phba->hb_outstanding = 1; + } else if (time_before_eq(phba->last_completion_time, + phba->skipped_hb)) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2857 Last completion time not " + " updated in %d ms\n", + jiffies_to_msecs(jiffies + - phba->last_completion_time)); + } else + phba->skipped_hb = jiffies; - if (retval != MBX_BUSY && retval != MBX_SUCCESS) { - mempool_free(pmboxq, phba->mbox_mem_pool); - mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); - return; - } mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); - phba->hb_outstanding = 1; return; } else { /* @@ -3281,10 +3300,10 @@ lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) if (!ndlp) return 0; } - if (phba->pport->port_state <= LPFC_FLOGI) + if (phba->pport->port_state < LPFC_FLOGI) return NULL; /* If virtual link is not yet instantiated ignore CVL */ - if (vport->port_state <= LPFC_FDISC) + if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)) return NULL; shost = lpfc_shost_from_vport(vport); if (!shost) @@ -3357,21 +3376,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, "evt_tag:x%x, fcf_index:x%x\n", acqe_fcoe->event_tag, acqe_fcoe->index); - /* If the FCF discovery is in progress, do nothing. */ - spin_lock_irq(&phba->hbalock); - if (phba->hba_flag & FCF_DISC_INPROGRESS) { - spin_unlock_irq(&phba->hbalock); - break; - } - /* If fast FCF failover rescan event is pending, do nothing */ - if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { - spin_unlock_irq(&phba->hbalock); - break; - } - spin_unlock_irq(&phba->hbalock); - - if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && - !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { + if (phba->fcf.fcf_flag & FCF_DISCOVERY) { /* * During period of FCF discovery, read the FCF * table record indexed by the event to update @@ -3385,13 +3390,26 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, acqe_fcoe->index); rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); } - /* If the FCF has been in discovered state, do nothing. */ + + /* If the FCF discovery is in progress, do nothing. */ spin_lock_irq(&phba->hbalock); + if (phba->hba_flag & FCF_DISC_INPROGRESS) { + spin_unlock_irq(&phba->hbalock); + break; + } + /* If fast FCF failover rescan event is pending, do nothing */ + if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { + spin_unlock_irq(&phba->hbalock); + break; + } + + /* If the FCF has been in discovered state, do nothing. */ if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { spin_unlock_irq(&phba->hbalock); break; } spin_unlock_irq(&phba->hbalock); + /* Otherwise, scan the entire FCF table and re-discover SAN */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2770 Start FCF table scan due to new FCF " @@ -3417,13 +3435,9 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, "2549 FCF disconnected from network index 0x%x" " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); - /* If the event is not for currently used fcf do nothing */ - if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index) - break; - /* We request port to rediscover the entire FCF table for - * a fast recovery from case that the current FCF record - * is no longer valid if we are not in the middle of FCF - * failover process already. + /* + * If we are in the middle of FCF failover process, clear + * the corresponding FCF bit in the roundrobin bitmap. */ spin_lock_irq(&phba->hbalock); if (phba->fcf.fcf_flag & FCF_DISCOVERY) { @@ -3432,9 +3446,23 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index); break; } + spin_unlock_irq(&phba->hbalock); + + /* If the event is not for currently used fcf do nothing */ + if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index) + break; + + /* + * Otherwise, request the port to rediscover the entire FCF + * table for a fast recovery from case that the current FCF + * is no longer valid as we are not in the middle of FCF + * failover process already. + */ + spin_lock_irq(&phba->hbalock); /* Mark the fast failover process in progress */ phba->fcf.fcf_flag |= FCF_DEAD_DISC; spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2771 Start FCF fast failover process due to " "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " @@ -3454,12 +3482,16 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, * as a link down to FCF registration. */ lpfc_sli4_fcf_dead_failthrough(phba); - } else - /* Handling fast FCF failover to a DEAD FCF event - * is considered equalivant to receiving CVL to all - * vports. + } else { + /* Reset FCF roundrobin bmask for new discovery */ + memset(phba->fcf.fcf_rr_bmask, 0, + sizeof(*phba->fcf.fcf_rr_bmask)); + /* + * Handling fast FCF failover to a DEAD FCF event is + * considered equalivant to receiving CVL to all vports. */ lpfc_sli4_perform_all_vport_cvl(phba); + } break; case LPFC_FCOE_EVENT_TYPE_CVL: lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, @@ -3534,7 +3566,13 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, * the current registered FCF entry. */ lpfc_retry_pport_discovery(phba); - } + } else + /* + * Reset FCF roundrobin bmask for new + * discovery. + */ + memset(phba->fcf.fcf_rr_bmask, 0, + sizeof(*phba->fcf.fcf_rr_bmask)); } break; default: diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 9c2c7c7140c..0dfa310cd60 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -815,9 +815,15 @@ void lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; + struct lpfc_hba *phba = vport->phba; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); - + /* + * Set the re-reg VPI bit for f/w to update the MAC address. + */ + if ((phba->sli_rev == LPFC_SLI_REV4) && + !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) + mb->un.varRegVpi.upd = 1; mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base; mb->un.varRegVpi.sid = vport->fc_myDID; mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index c818a725596..2e51aa6b45b 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1325,7 +1325,7 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); pde5->reftag = reftag; - /* Endian convertion if necessary for PDE5 */ + /* Endianness conversion if necessary for PDE5 */ pde5->word0 = cpu_to_le32(pde5->word0); pde5->reftag = cpu_to_le32(pde5->reftag); @@ -1347,7 +1347,7 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, bf_set(pde6_ai, pde6, 1); bf_set(pde6_apptagval, pde6, apptagval); - /* Endian convertion if necessary for PDE6 */ + /* Endianness conversion if necessary for PDE6 */ pde6->word0 = cpu_to_le32(pde6->word0); pde6->word1 = cpu_to_le32(pde6->word1); pde6->word2 = cpu_to_le32(pde6->word2); @@ -1459,7 +1459,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); pde5->reftag = reftag; - /* Endian convertion if necessary for PDE5 */ + /* Endianness conversion if necessary for PDE5 */ pde5->word0 = cpu_to_le32(pde5->word0); pde5->reftag = cpu_to_le32(pde5->reftag); @@ -1479,7 +1479,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, bf_set(pde6_ai, pde6, 1); bf_set(pde6_apptagval, pde6, apptagval); - /* Endian convertion if necessary for PDE6 */ + /* Endianness conversion if necessary for PDE6 */ pde6->word0 = cpu_to_le32(pde6->word0); pde6->word1 = cpu_to_le32(pde6->word1); pde6->word2 = cpu_to_le32(pde6->word2); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index e758eae0d0f..fb8905f893f 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1046,7 +1046,7 @@ lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) } else spin_unlock_irq(&phba->hbalock); - lpfc_printf_log(phba, KERN_ERR,LOG_SLI, + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0318 Failed to allocate IOTAG.last IOTAG is %d\n", psli->last_iotag); @@ -3914,7 +3914,8 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED | LPFC_SLI3_CRP_ENABLED | - LPFC_SLI3_BG_ENABLED); + LPFC_SLI3_BG_ENABLED | + LPFC_SLI3_DSS_ENABLED); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0442 Adapter failed to init, mbxCmd x%x " @@ -3949,8 +3950,23 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) } else phba->max_vpi = 0; - if (pmb->u.mb.un.varCfgPort.gdss) + phba->fips_level = 0; + phba->fips_spec_rev = 0; + if (pmb->u.mb.un.varCfgPort.gdss) { phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; + phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level; + phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2850 Security Crypto Active. FIPS x%d " + "(Spec Rev: x%d)", + phba->fips_level, phba->fips_spec_rev); + } + if (pmb->u.mb.un.varCfgPort.sec_err) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2856 Config Port Security Crypto " + "Error: x%x ", + pmb->u.mb.un.varCfgPort.sec_err); + } if (pmb->u.mb.un.varCfgPort.gerbm) phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; if (pmb->u.mb.un.varCfgPort.gcrp) @@ -9040,6 +9056,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, switch (bf_get(lpfc_cqe_code, &cqevt)) { case CQE_CODE_COMPL_WQE: /* Process the WQ/RQ complete event */ + phba->last_completion_time = jiffies; workposted = lpfc_sli4_sp_handle_els_wcqe(phba, (struct lpfc_wcqe_complete *)&cqevt); break; @@ -9050,11 +9067,13 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, break; case CQE_CODE_XRI_ABORTED: /* Process the WQ XRI abort event */ + phba->last_completion_time = jiffies; workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, (struct sli4_wcqe_xri_aborted *)&cqevt); break; case CQE_CODE_RECEIVE: /* Process the RQ event */ + phba->last_completion_time = jiffies; workposted = lpfc_sli4_sp_handle_rcqe(phba, (struct lpfc_rcqe *)&cqevt); break; @@ -9276,7 +9295,6 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, { struct lpfc_wcqe_release wcqe; bool workposted = false; - unsigned long iflag; /* Copy the work queue CQE and convert endian order if needed */ lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); @@ -9285,9 +9303,7 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { case CQE_CODE_COMPL_WQE: /* Process the WQ complete event */ - spin_lock_irqsave(&phba->hbalock, iflag); phba->last_completion_time = jiffies; - spin_unlock_irqrestore(&phba->hbalock, iflag); lpfc_sli4_fp_handle_fcp_wcqe(phba, (struct lpfc_wcqe_complete *)&wcqe); break; @@ -9298,6 +9314,7 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, break; case CQE_CODE_XRI_ABORTED: /* Process the WQ XRI abort event */ + phba->last_completion_time = jiffies; workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, (struct sli4_wcqe_xri_aborted *)&wcqe); break; @@ -12278,12 +12295,9 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) spin_lock_irq(&phba->hbalock); phba->hba_flag |= FCF_DISC_INPROGRESS; spin_unlock_irq(&phba->hbalock); - /* Reset FCF round robin index bmask for new scan */ - if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) { - memset(phba->fcf.fcf_rr_bmask, 0, - sizeof(*phba->fcf.fcf_rr_bmask)); + /* Reset eligible FCF count for new scan */ + if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) phba->fcf.eligible_fcf_cnt = 0; - } error = 0; } fail_fcf_scan: diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index d28830af71d..61afb3420a9 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.15" +#define LPFC_DRIVER_VERSION "8.3.16" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 58d1134935e..9793aa6afb1 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -4199,8 +4199,10 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, circularQ = &pm8001_ha->inbnd_q_tbl[0]; memset(&nvmd_req, 0, sizeof(nvmd_req)); rc = pm8001_tag_alloc(pm8001_ha, &tag); - if (rc) + if (rc) { + kfree(fw_control_context); return rc; + } ccb = &pm8001_ha->ccb_info[tag]; ccb->ccb_tag = tag; ccb->fw_control_context = fw_control_context; @@ -4276,8 +4278,10 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, ioctl_payload->length); memset(&nvmd_req, 0, sizeof(nvmd_req)); rc = pm8001_tag_alloc(pm8001_ha, &tag); - if (rc) + if (rc) { + kfree(fw_control_context); return rc; + } ccb = &pm8001_ha->ccb_info[tag]; ccb->fw_control_context = fw_control_context; ccb->ccb_tag = tag; @@ -4387,6 +4391,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, fw_control->len, 0) != 0) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Mem alloc failure\n")); + kfree(fw_control_context); return -ENOMEM; } } @@ -4401,8 +4406,10 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, fw_control_context->virtAddr = buffer; fw_control_context->len = fw_control->len; rc = pm8001_tag_alloc(pm8001_ha, &tag); - if (rc) + if (rc) { + kfree(fw_control_context); return rc; + } ccb = &pm8001_ha->ccb_info[tag]; ccb->fw_control_context = fw_control_context; ccb->ccb_tag = tag; diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index a79da8dd206..9dc0a6616ed 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h @@ -36,6 +36,24 @@ #include "ql4_dbg.h" #include "ql4_nx.h" +#if defined(CONFIG_PCIEAER) +#include <linux/aer.h> +#else +/* AER releated */ +static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev) +{ + return -EINVAL; +} +static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev) +{ + return -EINVAL; +} +static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) +{ + return -EINVAL; +} +#endif + #ifndef PCI_DEVICE_ID_QLOGIC_ISP4010 #define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010 #endif @@ -137,6 +155,9 @@ #define ISCSI_ALIAS_SIZE 32 /* ISCSI Alias name size */ #define ISCSI_NAME_SIZE 0xE0 /* ISCSI Name size */ +#define QL4_SESS_RECOVERY_TMO 30 /* iSCSI session */ + /* recovery timeout */ + #define LSDW(x) ((u32)((u64)(x))) #define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16)) @@ -249,7 +270,6 @@ struct ddb_entry { uint32_t default_time2wait; /* Default Min time between * relogins (+aens) */ - atomic_t port_down_timer; /* Device connection timer */ atomic_t retry_relogin_timer; /* Min Time between relogins * (4000 only) */ atomic_t relogin_timer; /* Max Time to wait for relogin to complete */ @@ -378,7 +398,9 @@ struct scsi_qla_host { #define AF_MSI_ENABLED 16 /* 0x00010000 */ #define AF_MSIX_ENABLED 17 /* 0x00020000 */ #define AF_MBOX_COMMAND_NOPOLL 18 /* 0x00040000 */ - +#define AF_FW_RECOVERY 19 /* 0x00080000 */ +#define AF_EEH_BUSY 20 /* 0x00100000 */ +#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */ unsigned long dpc_flags; @@ -474,7 +496,6 @@ struct scsi_qla_host { uint32_t timer_active; /* Recovery Timers */ - uint32_t port_down_retry_count; uint32_t discovery_wait; atomic_t check_relogin_timeouts; uint32_t retry_reset_ha_cnt; @@ -615,6 +636,15 @@ static inline int is_qla8022(struct scsi_qla_host *ha) return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022; } +/* Note: Currently AER/EEH is now supported only for 8022 cards + * This function needs to be updated when AER/EEH is enabled + * for other cards. + */ +static inline int is_aer_supported(struct scsi_qla_host *ha) +{ + return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022; +} + static inline int adapter_up(struct scsi_qla_host *ha) { return (test_bit(AF_ONLINE, &ha->flags) != 0) && diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h index c94c9ddfb3a..0336c6db8cb 100644 --- a/drivers/scsi/qla4xxx/ql4_fw.h +++ b/drivers/scsi/qla4xxx/ql4_fw.h @@ -673,17 +673,17 @@ struct flash_sys_info { }; /* 200 */ struct mbx_sys_info { - uint8_t board_id_str[16]; /* Keep board ID string first */ - /* in this structure for GUI. */ - uint16_t board_id; /* board ID code */ - uint16_t phys_port_cnt; /* number of physical network ports */ - uint16_t port_num; /* network port for this PCI function */ + uint8_t board_id_str[16]; /* 0-f Keep board ID string first */ + /* in this structure for GUI. */ + uint16_t board_id; /* 10-11 board ID code */ + uint16_t phys_port_cnt; /* 12-13 number of physical network ports */ + uint16_t port_num; /* 14-15 network port for this PCI function */ /* (port 0 is first port) */ - uint8_t mac_addr[6]; /* MAC address for this PCI function */ - uint32_t iscsi_pci_func_cnt; /* number of iSCSI PCI functions */ - uint32_t pci_func; /* this PCI function */ - unsigned char serial_number[16]; /* serial number string */ - uint8_t reserved[16]; + uint8_t mac_addr[6]; /* 16-1b MAC address for this PCI function */ + uint32_t iscsi_pci_func_cnt; /* 1c-1f number of iSCSI PCI functions */ + uint32_t pci_func; /* 20-23 this PCI function */ + unsigned char serial_number[16]; /* 24-33 serial number string */ + uint8_t reserved[12]; /* 34-3f */ }; struct crash_record { diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h index c9cd5d6db98..95a26fb1626 100644 --- a/drivers/scsi/qla4xxx/ql4_glbl.h +++ b/drivers/scsi/qla4xxx/ql4_glbl.h @@ -93,6 +93,7 @@ void qla4xxx_free_irqs(struct scsi_qla_host *ha); void qla4xxx_process_response_queue(struct scsi_qla_host *ha); void qla4xxx_wake_dpc(struct scsi_qla_host *ha); void qla4xxx_get_conn_event_log(struct scsi_qla_host *ha); +void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha); void qla4_8xxx_pci_config(struct scsi_qla_host *); int qla4_8xxx_iospace_config(struct scsi_qla_host *ha); @@ -131,6 +132,7 @@ void qla4_8xxx_idc_unlock(struct scsi_qla_host *ha); int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha); void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha); void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha); +void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha); extern int ql4xextended_error_logging; extern int ql4xdiscoverywait; diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index 30073577c3a..4c9be77ee70 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c @@ -308,7 +308,6 @@ static int qla4xxx_fw_ready(struct scsi_qla_host *ha) DEBUG2(printk("scsi%ld: %s: unable to get firmware " "state\n", ha->host_no, __func__)); break; - } if (ha->firmware_state & FW_STATE_ERROR) { @@ -445,6 +444,16 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha) { int status = QLA_ERROR; + if (is_aer_supported(ha) && + test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags)) + return status; + + /* For 82xx, stop firmware before initializing because if BIOS + * has previously initialized firmware, then driver's initialize + * firmware will fail. */ + if (is_qla8022(ha)) + qla4_8xxx_stop_firmware(ha); + ql4_printk(KERN_INFO, ha, "Initializing firmware..\n"); if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR) { DEBUG2(printk("scsi%ld: %s: Failed to initialize firmware " @@ -669,7 +678,6 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha, } ddb_entry->fw_ddb_index = fw_ddb_index; - atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count); atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); atomic_set(&ddb_entry->relogin_timer, 0); atomic_set(&ddb_entry->relogin_retry_count, 0); @@ -1556,8 +1564,6 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, /* Device is back online. */ if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); - atomic_set(&ddb_entry->port_down_timer, - ha->port_down_retry_count); atomic_set(&ddb_entry->relogin_retry_count, 0); atomic_set(&ddb_entry->relogin_timer, 0); clear_bit(DF_RELOGIN, &ddb_entry->flags); diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c index f89973deac5..4ef9ba112ee 100644 --- a/drivers/scsi/qla4xxx/ql4_iocb.c +++ b/drivers/scsi/qla4xxx/ql4_iocb.c @@ -19,7 +19,7 @@ qla4xxx_space_in_req_ring(struct scsi_qla_host *ha, uint16_t req_cnt) /* Calculate number of free request entries. */ if ((req_cnt + 2) >= ha->req_q_count) { - cnt = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out); + cnt = (uint16_t) ha->isp_ops->rd_shdw_req_q_out(ha); if (ha->request_in < cnt) ha->req_q_count = cnt - ha->request_in; else diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index aa65697a86b..2a1ab63f3eb 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c @@ -816,6 +816,9 @@ irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id) unsigned long flags = 0; uint8_t reqs_count = 0; + if (unlikely(pci_channel_offline(ha->pdev))) + return IRQ_HANDLED; + ha->isr_count++; status = qla4_8xxx_rd_32(ha, ISR_INT_VECTOR); if (!(status & ha->nx_legacy_intr.int_vec_bit)) diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index 940ee561ee0..90021704d8c 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c @@ -39,6 +39,22 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, "pointer\n", ha->host_no, __func__)); return status; } + + if (is_qla8022(ha) && + test_bit(AF_FW_RECOVERY, &ha->flags)) { + DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: prematurely " + "completing mbx cmd as firmware recovery detected\n", + ha->host_no, __func__)); + return status; + } + + if ((is_aer_supported(ha)) && + (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) { + DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, " + "timeout MBX Exiting.\n", ha->host_no, __func__)); + return status; + } + /* Mailbox code active */ wait_count = MBOX_TOV * 100; @@ -150,6 +166,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) { if (time_after_eq(jiffies, wait_count)) break; + /* * Service the interrupt. * The ISR will save the mailbox status registers @@ -196,6 +213,14 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, /* Check for mailbox timeout. */ if (!test_bit(AF_MBOX_COMMAND_DONE, &ha->flags)) { + if (is_qla8022(ha) && + test_bit(AF_FW_RECOVERY, &ha->flags)) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: %s: prematurely completing mbx cmd as " + "firmware recovery detected\n", + ha->host_no, __func__)); + goto mbox_exit; + } DEBUG2(printk("scsi%ld: Mailbox Cmd 0x%08X timed out ...," " Scheduling Adapter Reset\n", ha->host_no, mbx_cmd[0])); @@ -246,6 +271,28 @@ mbox_exit: return status; } +void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha) +{ + set_bit(AF_FW_RECOVERY, &ha->flags); + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: set FW RECOVERY!\n", + ha->host_no, __func__); + + if (test_bit(AF_MBOX_COMMAND, &ha->flags)) { + if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags)) { + complete(&ha->mbx_intr_comp); + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Due to fw " + "recovery, doing premature completion of " + "mbx cmd\n", ha->host_no, __func__); + + } else { + set_bit(AF_MBOX_COMMAND_DONE, &ha->flags); + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Due to fw " + "recovery, doing premature completion of " + "polling mbx cmd\n", ha->host_no, __func__); + } + } +} + static uint8_t qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma) @@ -361,7 +408,6 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha, min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/ /* Save Command Line Paramater info */ - ha->port_down_retry_count = le16_to_cpu(init_fw_cb->conn_ka_timeout); ha->discovery_wait = ql4xdiscoverywait; if (ha->acb_version == ACB_SUPPORTED) { diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index 3e119ae7839..5d4a3822382 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c @@ -1418,7 +1418,7 @@ static int qla4_8xxx_rcvpeg_ready(struct scsi_qla_host *ha) return QLA_SUCCESS; } -static inline void +void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha) { uint32_t drv_active; @@ -1441,11 +1441,15 @@ qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha) static inline int qla4_8xxx_need_reset(struct scsi_qla_host *ha) { - uint32_t drv_state; + uint32_t drv_state, drv_active; int rval; + drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE); rval = drv_state & (1 << (ha->func_num * 4)); + if ((test_bit(AF_EEH_BUSY, &ha->flags)) && drv_active) + rval = 1; + return rval; } @@ -1949,7 +1953,8 @@ qla4_8xxx_get_fdt_info(struct scsi_qla_host *ha) uint16_t cnt, chksum; uint16_t *wptr; struct qla_fdt_layout *fdt; - uint16_t mid, fid; + uint16_t mid = 0; + uint16_t fid = 0; struct ql82xx_hw_data *hw = &ha->hw; hw->flash_conf_off = FARX_ACCESS_FLASH_CONF; @@ -2105,6 +2110,9 @@ qla4_8xxx_isp_reset(struct scsi_qla_host *ha) qla4_8xxx_clear_rst_ready(ha); qla4_8xxx_idc_unlock(ha); + if (rval == QLA_SUCCESS) + clear_bit(AF_FW_RECOVERY, &ha->flags); + return rval; } @@ -2145,7 +2153,8 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha) goto exit_validate_mac82; } - if (mbox_sts[4] < sizeof(*sys_info)) { + /* Make sure we receive the minimum required data to cache internally */ + if (mbox_sts[4] < offsetof(struct mbx_sys_info, reserved)) { DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO data receive" " error (%x)\n", ha->host_no, __func__, mbox_sts[4])); goto exit_validate_mac82; diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 5529b2a3974..370d40ff152 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -163,10 +163,10 @@ static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session) if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) { atomic_set(&ddb_entry->state, DDB_STATE_DEAD); - DEBUG2(printk("scsi%ld: %s: ddb [%d] port down retry count " + DEBUG2(printk("scsi%ld: %s: ddb [%d] session recovery timeout " "of (%d) secs exhausted, marking device DEAD.\n", ha->host_no, __func__, ddb_entry->fw_ddb_index, - ha->port_down_retry_count)); + QL4_SESS_RECOVERY_TMO)); qla4xxx_wake_dpc(ha); } @@ -298,7 +298,8 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry) { int err; - ddb_entry->sess->recovery_tmo = ddb_entry->ha->port_down_retry_count; + ddb_entry->sess->recovery_tmo = QL4_SESS_RECOVERY_TMO; + err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index); if (err) { DEBUG2(printk(KERN_ERR "Could not add session.\n")); @@ -474,6 +475,14 @@ static int qla4xxx_queuecommand(struct scsi_cmnd *cmd, struct srb *srb; int rval; + if (test_bit(AF_EEH_BUSY, &ha->flags)) { + if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags)) + cmd->result = DID_NO_CONNECT << 16; + else + cmd->result = DID_REQUEUE << 16; + goto qc_fail_command; + } + if (!sess) { cmd->result = DID_IMM_RETRY << 16; goto qc_fail_command; @@ -654,6 +663,13 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha) uint32_t fw_heartbeat_counter, halt_status; fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); + /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */ + if (fw_heartbeat_counter == 0xffffffff) { + DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen " + "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n", + ha->host_no, __func__)); + return; + } if (ha->fw_heartbeat_counter == fw_heartbeat_counter) { ha->seconds_since_last_heartbeat++; @@ -662,6 +678,7 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha) ha->seconds_since_last_heartbeat = 0; halt_status = qla4_8xxx_rd_32(ha, QLA82XX_PEG_HALT_STATUS1); + /* Since we cannot change dev_state in interrupt * context, set appropriate DPC flag then wakeup * DPC */ @@ -673,6 +690,7 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha) set_bit(DPC_RESET_HA, &ha->dpc_flags); } qla4xxx_wake_dpc(ha); + qla4xxx_mailbox_premature_completion(ha); } } ha->fw_heartbeat_counter = fw_heartbeat_counter; @@ -698,6 +716,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha) ha->host_no, __func__); set_bit(DPC_RESET_HA, &ha->dpc_flags); qla4xxx_wake_dpc(ha); + qla4xxx_mailbox_premature_completion(ha); } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { printk("scsi%ld: %s: HW State: NEED QUIES!\n", @@ -719,6 +738,19 @@ static void qla4xxx_timer(struct scsi_qla_host *ha) { struct ddb_entry *ddb_entry, *dtemp; int start_dpc = 0; + uint16_t w; + + /* If we are in the middle of AER/EEH processing + * skip any processing and reschedule the timer + */ + if (test_bit(AF_EEH_BUSY, &ha->flags)) { + mod_timer(&ha->timer, jiffies + HZ); + return; + } + + /* Hardware read to trigger an EEH error during mailbox waits. */ + if (!pci_channel_offline(ha->pdev)) + pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); if (test_bit(AF_HBA_GOING_AWAY, &ha->flags)) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s exited. HBA GOING AWAY\n", @@ -1207,7 +1239,13 @@ static void qla4xxx_do_dpc(struct work_struct *work) /* Initialization not yet finished. Don't do anything yet. */ if (!test_bit(AF_INIT_DONE, &ha->flags)) - return; + goto do_dpc_exit; + + if (test_bit(AF_EEH_BUSY, &ha->flags)) { + DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n", + ha->host_no, __func__, ha->flags)); + goto do_dpc_exit; + } /* HBA is in the process of being permanently disabled. * Don't process anything */ @@ -1346,6 +1384,8 @@ dpc_post_reset_ha: } } } + +do_dpc_exit: clear_bit(AF_DPC_SCHEDULED, &ha->flags); } @@ -1612,6 +1652,8 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, ha->host = host; ha->host_no = host->host_no; + pci_enable_pcie_error_reporting(pdev); + /* Setup Runtime configurable options */ if (is_qla8022(ha)) { ha->isp_ops = &qla4_8xxx_isp_ops; @@ -1630,6 +1672,10 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, ha->isp_ops = &qla4xxx_isp_ops; } + /* Set EEH reset type to fundamental if required by hba */ + if (is_qla8022(ha)) + pdev->needs_freset = 1; + /* Configure PCI I/O space. */ ret = ha->isp_ops->iospace_config(ha); if (ret) @@ -1726,6 +1772,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, } } + pci_save_state(ha->pdev); ha->isp_ops->enable_intrs(ha); /* Start timer thread. */ @@ -1752,6 +1799,7 @@ probe_failed: qla4xxx_free_adapter(ha); probe_failed_ioconfig: + pci_disable_pcie_error_reporting(pdev); scsi_host_put(ha->host); probe_disable_device: @@ -1781,6 +1829,7 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev) scsi_host_put(ha->host); + pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } @@ -1877,6 +1926,17 @@ static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha, int done = 0; struct srb *rp; uint32_t max_wait_time = EH_WAIT_CMD_TOV; + int ret = SUCCESS; + + /* Dont wait on command if PCI error is being handled + * by PCI AER driver + */ + if (unlikely(pci_channel_offline(ha->pdev)) || + (test_bit(AF_EEH_BUSY, &ha->flags))) { + ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n", + ha->host_no, __func__); + return ret; + } do { /* Checking to see if its returned to OS */ @@ -2172,6 +2232,252 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) return return_status; } +/* PCI AER driver recovers from all correctable errors w/o + * driver intervention. For uncorrectable errors PCI AER + * driver calls the following device driver's callbacks + * + * - Fatal Errors - link_reset + * - Non-Fatal Errors - driver's pci_error_detected() which + * returns CAN_RECOVER, NEED_RESET or DISCONNECT. + * + * PCI AER driver calls + * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled + * returns RECOVERED or NEED_RESET if fw_hung + * NEED_RESET - driver's slot_reset() + * DISCONNECT - device is dead & cannot recover + * RECOVERED - driver's pci_resume() + */ +static pci_ers_result_t +qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct scsi_qla_host *ha = pci_get_drvdata(pdev); + + ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n", + ha->host_no, __func__, state); + + if (!is_aer_supported(ha)) + return PCI_ERS_RESULT_NONE; + + switch (state) { + case pci_channel_io_normal: + clear_bit(AF_EEH_BUSY, &ha->flags); + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + set_bit(AF_EEH_BUSY, &ha->flags); + qla4xxx_mailbox_premature_completion(ha); + qla4xxx_free_irqs(ha); + pci_disable_device(pdev); + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + set_bit(AF_EEH_BUSY, &ha->flags); + set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags); + qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16); + return PCI_ERS_RESULT_DISCONNECT; + } + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * qla4xxx_pci_mmio_enabled() gets called if + * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER + * and read/write to the device still works. + **/ +static pci_ers_result_t +qla4xxx_pci_mmio_enabled(struct pci_dev *pdev) +{ + struct scsi_qla_host *ha = pci_get_drvdata(pdev); + + if (!is_aer_supported(ha)) + return PCI_ERS_RESULT_NONE; + + if (test_bit(AF_FW_RECOVERY, &ha->flags)) { + ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: firmware hang -- " + "mmio_enabled\n", ha->host_no, __func__); + return PCI_ERS_RESULT_NEED_RESET; + } else + return PCI_ERS_RESULT_RECOVERED; +} + +uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) +{ + uint32_t rval = QLA_ERROR; + int fn; + struct pci_dev *other_pdev = NULL; + + ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__); + + set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); + + if (test_bit(AF_ONLINE, &ha->flags)) { + clear_bit(AF_ONLINE, &ha->flags); + qla4xxx_mark_all_devices_missing(ha); + qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); + qla4xxx_abort_active_cmds(ha, DID_RESET << 16); + } + + fn = PCI_FUNC(ha->pdev->devfn); + while (fn > 0) { + fn--; + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at " + "func %x\n", ha->host_no, __func__, fn); + /* Get the pci device given the domain, bus, + * slot/function number */ + other_pdev = + pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), + ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), + fn)); + + if (!other_pdev) + continue; + + if (atomic_read(&other_pdev->enable_cnt)) { + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI " + "func in enabled state%x\n", ha->host_no, + __func__, fn); + pci_dev_put(other_pdev); + break; + } + pci_dev_put(other_pdev); + } + + /* The first function on the card, the reset owner will + * start & initialize the firmware. The other functions + * on the card will reset the firmware context + */ + if (!fn) { + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset " + "0x%x is the owner\n", ha->host_no, __func__, + ha->pdev->devfn); + + qla4_8xxx_idc_lock(ha); + qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, + QLA82XX_DEV_COLD); + + qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, + QLA82XX_IDC_VERSION); + + qla4_8xxx_idc_unlock(ha); + clear_bit(AF_FW_RECOVERY, &ha->flags); + rval = qla4xxx_initialize_adapter(ha, PRESERVE_DDB_LIST); + qla4_8xxx_idc_lock(ha); + + if (rval != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " + "FAILED\n", ha->host_no, __func__); + qla4_8xxx_clear_drv_active(ha); + qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, + QLA82XX_DEV_FAILED); + } else { + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " + "READY\n", ha->host_no, __func__); + qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, + QLA82XX_DEV_READY); + /* Clear driver state register */ + qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0); + qla4_8xxx_set_drv_active(ha); + ha->isp_ops->enable_intrs(ha); + } + qla4_8xxx_idc_unlock(ha); + } else { + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not " + "the reset owner\n", ha->host_no, __func__, + ha->pdev->devfn); + if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == + QLA82XX_DEV_READY)) { + clear_bit(AF_FW_RECOVERY, &ha->flags); + rval = qla4xxx_initialize_adapter(ha, + PRESERVE_DDB_LIST); + if (rval == QLA_SUCCESS) + ha->isp_ops->enable_intrs(ha); + qla4_8xxx_idc_lock(ha); + qla4_8xxx_set_drv_active(ha); + qla4_8xxx_idc_unlock(ha); + } + } + clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); + return rval; +} + +static pci_ers_result_t +qla4xxx_pci_slot_reset(struct pci_dev *pdev) +{ + pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; + struct scsi_qla_host *ha = pci_get_drvdata(pdev); + int rc; + + ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n", + ha->host_no, __func__); + + if (!is_aer_supported(ha)) + return PCI_ERS_RESULT_NONE; + + /* Restore the saved state of PCIe device - + * BAR registers, PCI Config space, PCIX, MSI, + * IOV states + */ + pci_restore_state(pdev); + + /* pci_restore_state() clears the saved_state flag of the device + * save restored state which resets saved_state flag + */ + pci_save_state(pdev); + + /* Initialize device or resume if in suspended state */ + rc = pci_enable_device(pdev); + if (rc) { + ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Cant re-enable " + "device after reset\n", ha->host_no, __func__); + goto exit_slot_reset; + } + + ret = qla4xxx_request_irqs(ha); + if (ret) { + ql4_printk(KERN_WARNING, ha, "Failed to reserve interrupt %d" + " already in use.\n", pdev->irq); + goto exit_slot_reset; + } + + if (is_qla8022(ha)) { + if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) { + ret = PCI_ERS_RESULT_RECOVERED; + goto exit_slot_reset; + } else + goto exit_slot_reset; + } + +exit_slot_reset: + ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n" + "device after reset\n", ha->host_no, __func__, ret); + return ret; +} + +static void +qla4xxx_pci_resume(struct pci_dev *pdev) +{ + struct scsi_qla_host *ha = pci_get_drvdata(pdev); + int ret; + + ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n", + ha->host_no, __func__); + + ret = qla4xxx_wait_for_hba_online(ha); + if (ret != QLA_SUCCESS) { + ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to " + "resume I/O from slot/link_reset\n", ha->host_no, + __func__); + } + + pci_cleanup_aer_uncorrect_error_status(pdev); + clear_bit(AF_EEH_BUSY, &ha->flags); +} + +static struct pci_error_handlers qla4xxx_err_handler = { + .error_detected = qla4xxx_pci_error_detected, + .mmio_enabled = qla4xxx_pci_mmio_enabled, + .slot_reset = qla4xxx_pci_slot_reset, + .resume = qla4xxx_pci_resume, +}; + static struct pci_device_id qla4xxx_pci_tbl[] = { { .vendor = PCI_VENDOR_ID_QLOGIC, @@ -2206,6 +2512,7 @@ static struct pci_driver qla4xxx_pci_driver = { .id_table = qla4xxx_pci_tbl, .probe = qla4xxx_probe_adapter, .remove = qla4xxx_remove_adapter, + .err_handler = &qla4xxx_err_handler, }; static int __init qla4xxx_module_init(void) diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h index c905dbd7533..a77b973f2cb 100644 --- a/drivers/scsi/qla4xxx/ql4_version.h +++ b/drivers/scsi/qla4xxx/ql4_version.h @@ -5,4 +5,4 @@ * See LICENSE.qla4xxx for copyright and licensing details. */ -#define QLA4XXX_DRIVER_VERSION "5.02.00-k2" +#define QLA4XXX_DRIVER_VERSION "5.02.00-k3" diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index bbbc186dbc1..1de30eb83bb 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -473,14 +473,17 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd) */ return SUCCESS; case RESERVATION_CONFLICT: - /* - * let issuer deal with this, it could be just fine - */ - return SUCCESS; + if (scmd->cmnd[0] == TEST_UNIT_READY) + /* it is a success, we probed the device and + * found it */ + return SUCCESS; + /* otherwise, we failed to send the command */ + return FAILED; case QUEUE_FULL: scsi_handle_queue_full(scmd->device); /* fall through */ case BUSY: + return NEEDS_RETRY; default: return FAILED; } diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c index 66241dd525a..c399be97992 100644 --- a/drivers/scsi/scsi_tgt_lib.c +++ b/drivers/scsi/scsi_tgt_lib.c @@ -185,6 +185,7 @@ static void scsi_tgt_cmd_destroy(struct work_struct *work) dprintk("cmd %p %d %u\n", cmd, cmd->sc_data_direction, rq_data_dir(cmd->request)); scsi_unmap_user_pages(tcmd); + tcmd->rq->bio = NULL; scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd); } diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c index 7356a56ac45..be0ebce36e5 100644 --- a/drivers/serial/68328serial.c +++ b/drivers/serial/68328serial.c @@ -869,7 +869,9 @@ static int get_serial_info(struct m68k_serial * info, tmp.close_delay = info->close_delay; tmp.closing_wait = info->closing_wait; tmp.custom_divisor = info->custom_divisor; - copy_to_user(retinfo,&tmp,sizeof(*retinfo)); + if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) + return -EFAULT; + return 0; } @@ -882,7 +884,8 @@ static int set_serial_info(struct m68k_serial * info, if (!new_info) return -EFAULT; - copy_from_user(&new_serial,new_info,sizeof(new_serial)); + if (copy_from_user(&new_serial, new_info, sizeof(new_serial))) + return -EFAULT; old_info = *info; if (!capable(CAP_SYS_ADMIN)) { @@ -943,8 +946,7 @@ static int get_lsr_info(struct m68k_serial * info, unsigned int *value) status = 0; #endif local_irq_restore(flags); - put_user(status,value); - return 0; + return put_user(status, value); } /* @@ -999,27 +1001,18 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file, send_break(info, arg ? arg*(100) : 250); return 0; case TIOCGSERIAL: - if (access_ok(VERIFY_WRITE, (void *) arg, - sizeof(struct serial_struct))) - return get_serial_info(info, - (struct serial_struct *) arg); - return -EFAULT; + return get_serial_info(info, + (struct serial_struct *) arg); case TIOCSSERIAL: return set_serial_info(info, (struct serial_struct *) arg); case TIOCSERGETLSR: /* Get line status register */ - if (access_ok(VERIFY_WRITE, (void *) arg, - sizeof(unsigned int))) - return get_lsr_info(info, (unsigned int *) arg); - return -EFAULT; + return get_lsr_info(info, (unsigned int *) arg); case TIOCSERGSTRUCT: - if (!access_ok(VERIFY_WRITE, (void *) arg, - sizeof(struct m68k_serial))) + if (copy_to_user((struct m68k_serial *) arg, + info, sizeof(struct m68k_serial))) return -EFAULT; - copy_to_user((struct m68k_serial *) arg, - info, sizeof(struct m68k_serial)); return 0; - default: return -ENOIOCTLCMD; } diff --git a/drivers/serial/8250_early.c b/drivers/serial/8250_early.c index b745792ec25..eaafb98debe 100644 --- a/drivers/serial/8250_early.c +++ b/drivers/serial/8250_early.c @@ -203,13 +203,13 @@ static int __init parse_options(struct early_serial8250_device *device, if (mmio || mmio32) printk(KERN_INFO - "Early serial console at MMIO%s 0x%llu (options '%s')\n", + "Early serial console at MMIO%s 0x%llx (options '%s')\n", mmio32 ? "32" : "", (unsigned long long)port->mapbase, device->options); else printk(KERN_INFO - "Early serial console at I/O port 0x%lu (options '%s')\n", + "Early serial console at I/O port 0x%lx (options '%s')\n", port->iobase, device->options); diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c index e57fb3d228e..5318dd3774a 100644 --- a/drivers/serial/bfin_sport_uart.c +++ b/drivers/serial/bfin_sport_uart.c @@ -121,7 +121,7 @@ static int sport_uart_setup(struct sport_uart_port *up, int size, int baud_rate) unsigned int sclk = get_sclk(); /* Set TCR1 and TCR2, TFSR is not enabled for uart */ - SPORT_PUT_TCR1(up, (ITFS | TLSBIT | ITCLK)); + SPORT_PUT_TCR1(up, (LATFS | ITFS | TFSR | TLSBIT | ITCLK)); SPORT_PUT_TCR2(up, size + 1); pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up)); diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c index 659a695bdad..2af8fd11312 100644 --- a/drivers/serial/of_serial.c +++ b/drivers/serial/of_serial.c @@ -14,11 +14,10 @@ #include <linux/slab.h> #include <linux/serial_core.h> #include <linux/serial_8250.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/nwpserial.h> -#include <asm/prom.h> - struct of_serial_info { int type; int line; diff --git a/drivers/serial/sn_console.c b/drivers/serial/sn_console.c index 7e5e5efea4e..cff9a306660 100644 --- a/drivers/serial/sn_console.c +++ b/drivers/serial/sn_console.c @@ -492,7 +492,7 @@ sn_receive_chars(struct sn_cons_port *port, unsigned long flags) sysrq_requested = 0; if (ch && time_before(jiffies, sysrq_timeout)) { spin_unlock_irqrestore(&port->sc_port.lock, flags); - handle_sysrq(ch, NULL); + handle_sysrq(ch); spin_lock_irqsave(&port->sc_port.lock, flags); /* ignore actual sysrq command char */ continue; diff --git a/drivers/serial/suncore.c b/drivers/serial/suncore.c index 544f2e25d0e..6381a0282ee 100644 --- a/drivers/serial/suncore.c +++ b/drivers/serial/suncore.c @@ -55,7 +55,12 @@ EXPORT_SYMBOL(sunserial_unregister_minors); int sunserial_console_match(struct console *con, struct device_node *dp, struct uart_driver *drv, int line, bool ignore_line) { - if (!con || of_console_device != dp) + if (!con) + return 0; + + drv->cons = con; + + if (of_console_device != dp) return 0; if (!ignore_line) { @@ -69,12 +74,10 @@ int sunserial_console_match(struct console *con, struct device_node *dp, return 0; } - con->index = line; - drv->cons = con; - - if (!console_set_on_cmdline) + if (!console_set_on_cmdline) { + con->index = line; add_preferred_console(con->name, line, NULL); - + } return 1; } EXPORT_SYMBOL(sunserial_console_match); diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c index f0a1418ce66..acd35d1ebd1 100644 --- a/drivers/spi/amba-pl022.c +++ b/drivers/spi/amba-pl022.c @@ -1723,7 +1723,7 @@ static void pl022_cleanup(struct spi_device *spi) } -static int __init +static int __devinit pl022_probe(struct amba_device *adev, struct amba_id *id) { struct device *dev = &adev->dev; @@ -1838,7 +1838,7 @@ pl022_probe(struct amba_device *adev, struct amba_id *id) return status; } -static int __exit +static int __devexit pl022_remove(struct amba_device *adev) { struct pl022 *pl022 = amba_get_drvdata(adev); @@ -1970,7 +1970,7 @@ static struct amba_driver pl022_driver = { }, .id_table = pl022_ids, .probe = pl022_probe, - .remove = __exit_p(pl022_remove), + .remove = __devexit_p(pl022_remove), .suspend = pl022_suspend, .resume = pl022_resume, }; diff --git a/drivers/spi/coldfire_qspi.c b/drivers/spi/coldfire_qspi.c index 59be3efe063..052b3c7fa6a 100644 --- a/drivers/spi/coldfire_qspi.c +++ b/drivers/spi/coldfire_qspi.c @@ -24,6 +24,7 @@ #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/platform_device.h> +#include <linux/sched.h> #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/io.h> diff --git a/drivers/spi/mpc512x_psc_spi.c b/drivers/spi/mpc512x_psc_spi.c index cddbfceb324..77d9e7ee8b2 100644 --- a/drivers/spi/mpc512x_psc_spi.c +++ b/drivers/spi/mpc512x_psc_spi.c @@ -406,9 +406,9 @@ static irqreturn_t mpc512x_psc_spi_isr(int irq, void *dev_id) } /* bus_num is used only for the case dev->platform_data == NULL */ -static int __init mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr, - u32 size, unsigned int irq, - s16 bus_num) +static int __devinit mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr, + u32 size, unsigned int irq, + s16 bus_num) { struct fsl_spi_platform_data *pdata = dev->platform_data; struct mpc512x_psc_spi *mps; @@ -492,7 +492,7 @@ free_master: return ret; } -static int __exit mpc512x_psc_spi_do_remove(struct device *dev) +static int __devexit mpc512x_psc_spi_do_remove(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct mpc512x_psc_spi *mps = spi_master_get_devdata(master); @@ -507,8 +507,8 @@ static int __exit mpc512x_psc_spi_do_remove(struct device *dev) return 0; } -static int __init mpc512x_psc_spi_of_probe(struct platform_device *op, - const struct of_device_id *match) +static int __devinit mpc512x_psc_spi_of_probe(struct platform_device *op, + const struct of_device_id *match) { const u32 *regaddr_p; u64 regaddr64, size64; @@ -539,7 +539,7 @@ static int __init mpc512x_psc_spi_of_probe(struct platform_device *op, irq_of_parse_and_map(op->dev.of_node, 0), id); } -static int __exit mpc512x_psc_spi_of_remove(struct platform_device *op) +static int __devexit mpc512x_psc_spi_of_remove(struct platform_device *op) { return mpc512x_psc_spi_do_remove(&op->dev); } @@ -553,7 +553,7 @@ MODULE_DEVICE_TABLE(of, mpc512x_psc_spi_of_match); static struct of_platform_driver mpc512x_psc_spi_of_driver = { .probe = mpc512x_psc_spi_of_probe, - .remove = __exit_p(mpc512x_psc_spi_of_remove), + .remove = __devexit_p(mpc512x_psc_spi_of_remove), .driver = { .name = "mpc512x-psc-spi", .owner = THIS_MODULE, diff --git a/drivers/spi/omap_spi_100k.c b/drivers/spi/omap_spi_100k.c index 24668b30a52..9bd1c92ad96 100644 --- a/drivers/spi/omap_spi_100k.c +++ b/drivers/spi/omap_spi_100k.c @@ -141,7 +141,12 @@ static void spi100k_write_data(struct spi_master *master, int len, int data) { struct omap1_spi100k *spi100k = spi_master_get_devdata(master); - /* write 16-bit word */ + /* write 16-bit word, shifting 8-bit data if necessary */ + if (len <= 8) { + data <<= 8; + len = 16; + } + spi100k_enable_clock(master); writew( data , spi100k->base + SPI_TX_MSB); @@ -162,6 +167,10 @@ static int spi100k_read_data(struct spi_master *master, int len) int dataH,dataL; struct omap1_spi100k *spi100k = spi_master_get_devdata(master); + /* Always do at least 16 bits */ + if (len <= 8) + len = 16; + spi100k_enable_clock(master); writew(SPI_CTRL_SEN(0) | SPI_CTRL_WORD_SIZE(len) | @@ -214,10 +223,6 @@ omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) c = count; word_len = cs->word_len; - /* RX_ONLY mode needs dummy data in TX reg */ - if (xfer->tx_buf == NULL) - spi100k_write_data(spi->master,word_len, 0); - if (word_len <= 8) { u8 *rx; const u8 *tx; @@ -227,9 +232,9 @@ omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) do { c-=1; if (xfer->tx_buf != NULL) - spi100k_write_data(spi->master,word_len, *tx); + spi100k_write_data(spi->master, word_len, *tx++); if (xfer->rx_buf != NULL) - *rx = spi100k_read_data(spi->master,word_len); + *rx++ = spi100k_read_data(spi->master, word_len); } while(c); } else if (word_len <= 16) { u16 *rx; @@ -380,10 +385,6 @@ static void omap1_spi100k_work(struct work_struct *work) if (t->len) { unsigned count; - /* RX_ONLY mode needs dummy data in TX reg */ - if (t->tx_buf == NULL) - spi100k_write_data(spi->master, 8, 0); - count = omap1_spi100k_txrx_pio(spi, t); m->actual_length += count; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 1bb1b88780c..a9e5c79ae52 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -528,6 +528,10 @@ int spi_register_master(struct spi_master *master) dynamic = 1; } + spin_lock_init(&master->bus_lock_spinlock); + mutex_init(&master->bus_lock_mutex); + master->bus_lock_flag = 0; + /* register the device, then userspace will see it. * registration fails if the bus ID is in use. */ @@ -670,6 +674,35 @@ int spi_setup(struct spi_device *spi) } EXPORT_SYMBOL_GPL(spi_setup); +static int __spi_async(struct spi_device *spi, struct spi_message *message) +{ + struct spi_master *master = spi->master; + + /* Half-duplex links include original MicroWire, and ones with + * only one data pin like SPI_3WIRE (switches direction) or where + * either MOSI or MISO is missing. They can also be caused by + * software limitations. + */ + if ((master->flags & SPI_MASTER_HALF_DUPLEX) + || (spi->mode & SPI_3WIRE)) { + struct spi_transfer *xfer; + unsigned flags = master->flags; + + list_for_each_entry(xfer, &message->transfers, transfer_list) { + if (xfer->rx_buf && xfer->tx_buf) + return -EINVAL; + if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) + return -EINVAL; + if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) + return -EINVAL; + } + } + + message->spi = spi; + message->status = -EINPROGRESS; + return master->transfer(spi, message); +} + /** * spi_async - asynchronous SPI transfer * @spi: device with which data will be exchanged @@ -702,33 +735,68 @@ EXPORT_SYMBOL_GPL(spi_setup); int spi_async(struct spi_device *spi, struct spi_message *message) { struct spi_master *master = spi->master; + int ret; + unsigned long flags; - /* Half-duplex links include original MicroWire, and ones with - * only one data pin like SPI_3WIRE (switches direction) or where - * either MOSI or MISO is missing. They can also be caused by - * software limitations. - */ - if ((master->flags & SPI_MASTER_HALF_DUPLEX) - || (spi->mode & SPI_3WIRE)) { - struct spi_transfer *xfer; - unsigned flags = master->flags; + spin_lock_irqsave(&master->bus_lock_spinlock, flags); - list_for_each_entry(xfer, &message->transfers, transfer_list) { - if (xfer->rx_buf && xfer->tx_buf) - return -EINVAL; - if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) - return -EINVAL; - if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) - return -EINVAL; - } - } + if (master->bus_lock_flag) + ret = -EBUSY; + else + ret = __spi_async(spi, message); - message->spi = spi; - message->status = -EINPROGRESS; - return master->transfer(spi, message); + spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); + + return ret; } EXPORT_SYMBOL_GPL(spi_async); +/** + * spi_async_locked - version of spi_async with exclusive bus usage + * @spi: device with which data will be exchanged + * @message: describes the data transfers, including completion callback + * Context: any (irqs may be blocked, etc) + * + * This call may be used in_irq and other contexts which can't sleep, + * as well as from task contexts which can sleep. + * + * The completion callback is invoked in a context which can't sleep. + * Before that invocation, the value of message->status is undefined. + * When the callback is issued, message->status holds either zero (to + * indicate complete success) or a negative error code. After that + * callback returns, the driver which issued the transfer request may + * deallocate the associated memory; it's no longer in use by any SPI + * core or controller driver code. + * + * Note that although all messages to a spi_device are handled in + * FIFO order, messages may go to different devices in other orders. + * Some device might be higher priority, or have various "hard" access + * time requirements, for example. + * + * On detection of any fault during the transfer, processing of + * the entire message is aborted, and the device is deselected. + * Until returning from the associated message completion callback, + * no other spi_message queued to that device will be processed. + * (This rule applies equally to all the synchronous transfer calls, + * which are wrappers around this core asynchronous primitive.) + */ +int spi_async_locked(struct spi_device *spi, struct spi_message *message) +{ + struct spi_master *master = spi->master; + int ret; + unsigned long flags; + + spin_lock_irqsave(&master->bus_lock_spinlock, flags); + + ret = __spi_async(spi, message); + + spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); + + return ret; + +} +EXPORT_SYMBOL_GPL(spi_async_locked); + /*-------------------------------------------------------------------------*/ @@ -742,6 +810,32 @@ static void spi_complete(void *arg) complete(arg); } +static int __spi_sync(struct spi_device *spi, struct spi_message *message, + int bus_locked) +{ + DECLARE_COMPLETION_ONSTACK(done); + int status; + struct spi_master *master = spi->master; + + message->complete = spi_complete; + message->context = &done; + + if (!bus_locked) + mutex_lock(&master->bus_lock_mutex); + + status = spi_async_locked(spi, message); + + if (!bus_locked) + mutex_unlock(&master->bus_lock_mutex); + + if (status == 0) { + wait_for_completion(&done); + status = message->status; + } + message->context = NULL; + return status; +} + /** * spi_sync - blocking/synchronous SPI data transfers * @spi: device with which data will be exchanged @@ -765,21 +859,86 @@ static void spi_complete(void *arg) */ int spi_sync(struct spi_device *spi, struct spi_message *message) { - DECLARE_COMPLETION_ONSTACK(done); - int status; - - message->complete = spi_complete; - message->context = &done; - status = spi_async(spi, message); - if (status == 0) { - wait_for_completion(&done); - status = message->status; - } - message->context = NULL; - return status; + return __spi_sync(spi, message, 0); } EXPORT_SYMBOL_GPL(spi_sync); +/** + * spi_sync_locked - version of spi_sync with exclusive bus usage + * @spi: device with which data will be exchanged + * @message: describes the data transfers + * Context: can sleep + * + * This call may only be used from a context that may sleep. The sleep + * is non-interruptible, and has no timeout. Low-overhead controller + * drivers may DMA directly into and out of the message buffers. + * + * This call should be used by drivers that require exclusive access to the + * SPI bus. It has to be preceeded by a spi_bus_lock call. The SPI bus must + * be released by a spi_bus_unlock call when the exclusive access is over. + * + * It returns zero on success, else a negative error code. + */ +int spi_sync_locked(struct spi_device *spi, struct spi_message *message) +{ + return __spi_sync(spi, message, 1); +} +EXPORT_SYMBOL_GPL(spi_sync_locked); + +/** + * spi_bus_lock - obtain a lock for exclusive SPI bus usage + * @master: SPI bus master that should be locked for exclusive bus access + * Context: can sleep + * + * This call may only be used from a context that may sleep. The sleep + * is non-interruptible, and has no timeout. + * + * This call should be used by drivers that require exclusive access to the + * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the + * exclusive access is over. Data transfer must be done by spi_sync_locked + * and spi_async_locked calls when the SPI bus lock is held. + * + * It returns zero on success, else a negative error code. + */ +int spi_bus_lock(struct spi_master *master) +{ + unsigned long flags; + + mutex_lock(&master->bus_lock_mutex); + + spin_lock_irqsave(&master->bus_lock_spinlock, flags); + master->bus_lock_flag = 1; + spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); + + /* mutex remains locked until spi_bus_unlock is called */ + + return 0; +} +EXPORT_SYMBOL_GPL(spi_bus_lock); + +/** + * spi_bus_unlock - release the lock for exclusive SPI bus usage + * @master: SPI bus master that was locked for exclusive bus access + * Context: can sleep + * + * This call may only be used from a context that may sleep. The sleep + * is non-interruptible, and has no timeout. + * + * This call releases an SPI bus lock previously obtained by an spi_bus_lock + * call. + * + * It returns zero on success, else a negative error code. + */ +int spi_bus_unlock(struct spi_master *master) +{ + master->bus_lock_flag = 0; + + mutex_unlock(&master->bus_lock_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(spi_bus_unlock); + /* portable code must never pass more than 32 bytes */ #define SPI_BUFSIZ max(32,SMP_CACHE_BYTES) diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c index 5265330a528..8b55724d5f3 100644 --- a/drivers/spi/spi_bitbang.c +++ b/drivers/spi/spi_bitbang.c @@ -259,7 +259,6 @@ static void bitbang_work(struct work_struct *work) struct spi_bitbang *bitbang = container_of(work, struct spi_bitbang, work); unsigned long flags; - int do_setup = -1; int (*setup_transfer)(struct spi_device *, struct spi_transfer *); @@ -275,6 +274,7 @@ static void bitbang_work(struct work_struct *work) unsigned tmp; unsigned cs_change; int status; + int do_setup = -1; m = container_of(bitbang->queue.next, struct spi_message, queue); @@ -307,6 +307,8 @@ static void bitbang_work(struct work_struct *work) status = setup_transfer(spi, t); if (status < 0) break; + if (do_setup == -1) + do_setup = 0; } /* set up default clock polarity, and activate chip; @@ -367,11 +369,6 @@ static void bitbang_work(struct work_struct *work) m->status = status; m->complete(m->context); - /* restore speed and wordsize if it was overridden */ - if (do_setup == 1) - setup_transfer(spi, NULL); - do_setup = 0; - /* normally deactivate chipselect ... unless no error and * cs_change has hinted that the next message will probably * be for this chip too. diff --git a/drivers/spi/spi_bitbang_txrx.h b/drivers/spi/spi_bitbang_txrx.h index fc033bbf918..c16bf853c3e 100644 --- a/drivers/spi/spi_bitbang_txrx.h +++ b/drivers/spi/spi_bitbang_txrx.h @@ -44,7 +44,7 @@ static inline u32 bitbang_txrx_be_cpha0(struct spi_device *spi, - unsigned nsecs, unsigned cpol, + unsigned nsecs, unsigned cpol, unsigned flags, u32 word, u8 bits) { /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */ @@ -53,7 +53,8 @@ bitbang_txrx_be_cpha0(struct spi_device *spi, for (word <<= (32 - bits); likely(bits); bits--) { /* setup MSB (to slave) on trailing edge */ - setmosi(spi, word & (1 << 31)); + if ((flags & SPI_MASTER_NO_TX) == 0) + setmosi(spi, word & (1 << 31)); spidelay(nsecs); /* T(setup) */ setsck(spi, !cpol); @@ -61,7 +62,8 @@ bitbang_txrx_be_cpha0(struct spi_device *spi, /* sample MSB (from slave) on leading edge */ word <<= 1; - word |= getmiso(spi); + if ((flags & SPI_MASTER_NO_RX) == 0) + word |= getmiso(spi); setsck(spi, cpol); } return word; @@ -69,7 +71,7 @@ bitbang_txrx_be_cpha0(struct spi_device *spi, static inline u32 bitbang_txrx_be_cpha1(struct spi_device *spi, - unsigned nsecs, unsigned cpol, + unsigned nsecs, unsigned cpol, unsigned flags, u32 word, u8 bits) { /* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */ @@ -79,7 +81,8 @@ bitbang_txrx_be_cpha1(struct spi_device *spi, /* setup MSB (to slave) on leading edge */ setsck(spi, !cpol); - setmosi(spi, word & (1 << 31)); + if ((flags & SPI_MASTER_NO_TX) == 0) + setmosi(spi, word & (1 << 31)); spidelay(nsecs); /* T(setup) */ setsck(spi, cpol); @@ -87,7 +90,8 @@ bitbang_txrx_be_cpha1(struct spi_device *spi, /* sample MSB (from slave) on trailing edge */ word <<= 1; - word |= getmiso(spi); + if ((flags & SPI_MASTER_NO_RX) == 0) + word |= getmiso(spi); } return word; } diff --git a/drivers/spi/spi_butterfly.c b/drivers/spi/spi_butterfly.c index 8b528128111..0d4ceba3b59 100644 --- a/drivers/spi/spi_butterfly.c +++ b/drivers/spi/spi_butterfly.c @@ -156,7 +156,7 @@ butterfly_txrx_word_mode0(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits); + return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); } /*----------------------------------------------------------------------*/ diff --git a/drivers/spi/spi_gpio.c b/drivers/spi/spi_gpio.c index 7edbd5807e0..e24a63498ac 100644 --- a/drivers/spi/spi_gpio.c +++ b/drivers/spi/spi_gpio.c @@ -146,25 +146,63 @@ static inline int getmiso(const struct spi_device *spi) static u32 spi_gpio_txrx_word_mode0(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits); + return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); } static u32 spi_gpio_txrx_word_mode1(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha1(spi, nsecs, 0, word, bits); + return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits); } static u32 spi_gpio_txrx_word_mode2(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha0(spi, nsecs, 1, word, bits); + return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits); } static u32 spi_gpio_txrx_word_mode3(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha1(spi, nsecs, 1, word, bits); + return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits); +} + +/* + * These functions do not call setmosi or getmiso if respective flag + * (SPI_MASTER_NO_RX or SPI_MASTER_NO_TX) is set, so they are safe to + * call when such pin is not present or defined in the controller. + * A separate set of callbacks is defined to get highest possible + * speed in the generic case (when both MISO and MOSI lines are + * available), as optimiser will remove the checks when argument is + * constant. + */ + +static u32 spi_gpio_spec_txrx_word_mode0(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + unsigned flags = spi->master->flags; + return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits); +} + +static u32 spi_gpio_spec_txrx_word_mode1(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + unsigned flags = spi->master->flags; + return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits); +} + +static u32 spi_gpio_spec_txrx_word_mode2(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + unsigned flags = spi->master->flags; + return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits); +} + +static u32 spi_gpio_spec_txrx_word_mode3(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + unsigned flags = spi->master->flags; + return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits); } /*----------------------------------------------------------------------*/ @@ -232,19 +270,30 @@ static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in) } static int __init -spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label) +spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label, + u16 *res_flags) { int value; /* NOTE: SPI_*_GPIO symbols may reference "pdata" */ - value = spi_gpio_alloc(SPI_MOSI_GPIO, label, false); - if (value) - goto done; + if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI) { + value = spi_gpio_alloc(SPI_MOSI_GPIO, label, false); + if (value) + goto done; + } else { + /* HW configuration without MOSI pin */ + *res_flags |= SPI_MASTER_NO_TX; + } - value = spi_gpio_alloc(SPI_MISO_GPIO, label, true); - if (value) - goto free_mosi; + if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO) { + value = spi_gpio_alloc(SPI_MISO_GPIO, label, true); + if (value) + goto free_mosi; + } else { + /* HW configuration without MISO pin */ + *res_flags |= SPI_MASTER_NO_RX; + } value = spi_gpio_alloc(SPI_SCK_GPIO, label, false); if (value) @@ -253,9 +302,11 @@ spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label) goto done; free_miso: - gpio_free(SPI_MISO_GPIO); + if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO) + gpio_free(SPI_MISO_GPIO); free_mosi: - gpio_free(SPI_MOSI_GPIO); + if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI) + gpio_free(SPI_MOSI_GPIO); done: return value; } @@ -266,6 +317,7 @@ static int __init spi_gpio_probe(struct platform_device *pdev) struct spi_master *master; struct spi_gpio *spi_gpio; struct spi_gpio_platform_data *pdata; + u16 master_flags = 0; pdata = pdev->dev.platform_data; #ifdef GENERIC_BITBANG @@ -273,7 +325,7 @@ static int __init spi_gpio_probe(struct platform_device *pdev) return -ENODEV; #endif - status = spi_gpio_request(pdata, dev_name(&pdev->dev)); + status = spi_gpio_request(pdata, dev_name(&pdev->dev), &master_flags); if (status < 0) return status; @@ -289,6 +341,7 @@ static int __init spi_gpio_probe(struct platform_device *pdev) if (pdata) spi_gpio->pdata = *pdata; + master->flags = master_flags; master->bus_num = pdev->id; master->num_chipselect = SPI_N_CHIPSEL; master->setup = spi_gpio_setup; @@ -296,10 +349,18 @@ static int __init spi_gpio_probe(struct platform_device *pdev) spi_gpio->bitbang.master = spi_master_get(master); spi_gpio->bitbang.chipselect = spi_gpio_chipselect; - spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0; - spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1; - spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2; - spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_txrx_word_mode3; + + if ((master_flags & (SPI_MASTER_NO_RX | SPI_MASTER_NO_RX)) == 0) { + spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0; + spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1; + spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2; + spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_txrx_word_mode3; + } else { + spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_spec_txrx_word_mode0; + spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_spec_txrx_word_mode1; + spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_spec_txrx_word_mode2; + spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3; + } spi_gpio->bitbang.setup_transfer = spi_bitbang_setup_transfer; spi_gpio->bitbang.flags = SPI_CS_HIGH; @@ -307,8 +368,10 @@ static int __init spi_gpio_probe(struct platform_device *pdev) if (status < 0) { spi_master_put(spi_gpio->bitbang.master); gpio_free: - gpio_free(SPI_MISO_GPIO); - gpio_free(SPI_MOSI_GPIO); + if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO) + gpio_free(SPI_MISO_GPIO); + if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI) + gpio_free(SPI_MOSI_GPIO); gpio_free(SPI_SCK_GPIO); spi_master_put(master); } @@ -331,8 +394,10 @@ static int __exit spi_gpio_remove(struct platform_device *pdev) platform_set_drvdata(pdev, NULL); - gpio_free(SPI_MISO_GPIO); - gpio_free(SPI_MOSI_GPIO); + if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO) + gpio_free(SPI_MISO_GPIO); + if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI) + gpio_free(SPI_MOSI_GPIO); gpio_free(SPI_SCK_GPIO); return status; diff --git a/drivers/spi/spi_lm70llp.c b/drivers/spi/spi_lm70llp.c index 86fb7b5993d..7746a41ab6d 100644 --- a/drivers/spi/spi_lm70llp.c +++ b/drivers/spi/spi_lm70llp.c @@ -191,7 +191,7 @@ static void lm70_chipselect(struct spi_device *spi, int value) */ static u32 lm70_txrx(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits); + return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); } static void spi_lm70llp_attach(struct parport *p) diff --git a/drivers/spi/spi_s3c24xx_gpio.c b/drivers/spi/spi_s3c24xx_gpio.c index 8979a75dbd7..be991359bf9 100644 --- a/drivers/spi/spi_s3c24xx_gpio.c +++ b/drivers/spi/spi_s3c24xx_gpio.c @@ -64,25 +64,25 @@ static inline u32 getmiso(struct spi_device *dev) static u32 s3c2410_spigpio_txrx_mode0(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits); + return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); } static u32 s3c2410_spigpio_txrx_mode1(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha1(spi, nsecs, 0, word, bits); + return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits); } static u32 s3c2410_spigpio_txrx_mode2(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha0(spi, nsecs, 1, word, bits); + return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits); } static u32 s3c2410_spigpio_txrx_mode3(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha1(spi, nsecs, 1, word, bits); + return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits); } diff --git a/drivers/spi/spi_sh_sci.c b/drivers/spi/spi_sh_sci.c index a511be7961a..5c643916119 100644 --- a/drivers/spi/spi_sh_sci.c +++ b/drivers/spi/spi_sh_sci.c @@ -83,25 +83,25 @@ static inline u32 getmiso(struct spi_device *dev) static u32 sh_sci_spi_txrx_mode0(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits); + return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); } static u32 sh_sci_spi_txrx_mode1(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha1(spi, nsecs, 0, word, bits); + return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits); } static u32 sh_sci_spi_txrx_mode2(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha0(spi, nsecs, 1, word, bits); + return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits); } static u32 sh_sci_spi_txrx_mode3(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) { - return bitbang_txrx_be_cpha1(spi, nsecs, 1, word, bits); + return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits); } static void sh_sci_spi_chipselect(struct spi_device *dev, int value) diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 4a7a7a7f11b..335311a98fd 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -113,8 +113,6 @@ source "drivers/staging/vme/Kconfig" source "drivers/staging/memrar/Kconfig" -source "drivers/staging/sep/Kconfig" - source "drivers/staging/iio/Kconfig" source "drivers/staging/zram/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index ca5c03eb3ce..e3f1e1b6095 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -38,7 +38,6 @@ obj-$(CONFIG_FB_UDL) += udlfb/ obj-$(CONFIG_HYPERV) += hv/ obj-$(CONFIG_VME_BUS) += vme/ obj-$(CONFIG_MRST_RAR_HANDLER) += memrar/ -obj-$(CONFIG_DX_SEP) += sep/ obj-$(CONFIG_IIO) += iio/ obj-$(CONFIG_ZRAM) += zram/ obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/ diff --git a/drivers/staging/batman-adv/bat_sysfs.c b/drivers/staging/batman-adv/bat_sysfs.c index b4a8d5eb64f..05ca15a6c9f 100644 --- a/drivers/staging/batman-adv/bat_sysfs.c +++ b/drivers/staging/batman-adv/bat_sysfs.c @@ -267,6 +267,10 @@ static ssize_t store_log_level(struct kobject *kobj, struct attribute *attr, if (atomic_read(&bat_priv->log_level) == log_level_tmp) return count; + bat_info(net_dev, "Changing log level from: %i to: %li\n", + atomic_read(&bat_priv->log_level), + log_level_tmp); + atomic_set(&bat_priv->log_level, (unsigned)log_level_tmp); return count; } diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c index 92c216a5688..baa8b05b9e8 100644 --- a/drivers/staging/batman-adv/hard-interface.c +++ b/drivers/staging/batman-adv/hard-interface.c @@ -129,6 +129,9 @@ static bool hardif_is_iface_up(struct batman_if *batman_if) static void update_mac_addresses(struct batman_if *batman_if) { + if (!batman_if || !batman_if->packet_buff) + return; + addr_to_string(batman_if->addr_str, batman_if->net_dev->dev_addr); memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig, @@ -194,8 +197,6 @@ static void hardif_activate_interface(struct net_device *net_dev, if (batman_if->if_status != IF_INACTIVE) return; - dev_hold(batman_if->net_dev); - update_mac_addresses(batman_if); batman_if->if_status = IF_TO_BE_ACTIVATED; @@ -222,8 +223,6 @@ static void hardif_deactivate_interface(struct net_device *net_dev, (batman_if->if_status != IF_TO_BE_ACTIVATED)) return; - dev_put(batman_if->net_dev); - batman_if->if_status = IF_INACTIVE; bat_info(net_dev, "Interface deactivated: %s\n", batman_if->dev); @@ -318,11 +317,13 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev) if (ret != 1) goto out; + dev_hold(net_dev); + batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC); if (!batman_if) { pr_err("Can't add interface (%s): out of memory\n", net_dev->name); - goto out; + goto release_dev; } batman_if->dev = kstrdup(net_dev->name, GFP_ATOMIC); @@ -336,6 +337,7 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev) batman_if->if_num = -1; batman_if->net_dev = net_dev; batman_if->if_status = IF_NOT_IN_USE; + batman_if->packet_buff = NULL; INIT_LIST_HEAD(&batman_if->list); check_known_mac_addr(batman_if->net_dev->dev_addr); @@ -346,6 +348,8 @@ free_dev: kfree(batman_if->dev); free_if: kfree(batman_if); +release_dev: + dev_put(net_dev); out: return NULL; } @@ -374,6 +378,7 @@ static void hardif_remove_interface(struct batman_if *batman_if) batman_if->if_status = IF_TO_BE_REMOVED; list_del_rcu(&batman_if->list); sysfs_del_hardif(&batman_if->hardif_obj); + dev_put(batman_if->net_dev); call_rcu(&batman_if->rcu, hardif_free_interface); } @@ -393,15 +398,13 @@ static int hard_if_event(struct notifier_block *this, /* FIXME: each batman_if will be attached to a softif */ struct bat_priv *bat_priv = netdev_priv(soft_device); - if (!batman_if) - batman_if = hardif_add_interface(net_dev); + if (!batman_if && event == NETDEV_REGISTER) + batman_if = hardif_add_interface(net_dev); if (!batman_if) goto out; switch (event) { - case NETDEV_REGISTER: - break; case NETDEV_UP: hardif_activate_interface(soft_device, bat_priv, batman_if); break; @@ -442,8 +445,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, struct bat_priv *bat_priv = netdev_priv(soft_device); struct batman_packet *batman_packet; struct batman_if *batman_if; - struct net_device_stats *stats; - struct rtnl_link_stats64 temp; int ret; skb = skb_share_check(skb, GFP_ATOMIC); @@ -479,12 +480,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, if (batman_if->if_status != IF_ACTIVE) goto err_free; - stats = (struct net_device_stats *)dev_get_stats(skb->dev, &temp); - if (stats) { - stats->rx_packets++; - stats->rx_bytes += skb->len; - } - batman_packet = (struct batman_packet *)skb->data; if (batman_packet->version != COMPAT_VERSION) { diff --git a/drivers/staging/batman-adv/icmp_socket.c b/drivers/staging/batman-adv/icmp_socket.c index fc3d32c1272..3ae7dd2d2d4 100644 --- a/drivers/staging/batman-adv/icmp_socket.c +++ b/drivers/staging/batman-adv/icmp_socket.c @@ -67,6 +67,7 @@ static int bat_socket_open(struct inode *inode, struct file *file) INIT_LIST_HEAD(&socket_client->queue_list); socket_client->queue_len = 0; socket_client->index = i; + socket_client->bat_priv = inode->i_private; spin_lock_init(&socket_client->lock); init_waitqueue_head(&socket_client->queue_wait); @@ -151,9 +152,8 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf, static ssize_t bat_socket_write(struct file *file, const char __user *buff, size_t len, loff_t *off) { - /* FIXME: each orig_node->batman_if will be attached to a softif */ - struct bat_priv *bat_priv = netdev_priv(soft_device); struct socket_client *socket_client = file->private_data; + struct bat_priv *bat_priv = socket_client->bat_priv; struct icmp_packet_rr icmp_packet; struct orig_node *orig_node; struct batman_if *batman_if; @@ -168,6 +168,9 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff, return -EINVAL; } + if (!bat_priv->primary_if) + return -EFAULT; + if (len >= sizeof(struct icmp_packet_rr)) packet_len = sizeof(struct icmp_packet_rr); @@ -223,7 +226,8 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff, if (batman_if->if_status != IF_ACTIVE) goto dst_unreach; - memcpy(icmp_packet.orig, batman_if->net_dev->dev_addr, ETH_ALEN); + memcpy(icmp_packet.orig, + bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); if (packet_len == sizeof(struct icmp_packet_rr)) memcpy(icmp_packet.rr, batman_if->net_dev->dev_addr, ETH_ALEN); @@ -271,7 +275,7 @@ int bat_socket_setup(struct bat_priv *bat_priv) goto err; d = debugfs_create_file(ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR, - bat_priv->debug_dir, NULL, &fops); + bat_priv->debug_dir, bat_priv, &fops); if (d) goto err; diff --git a/drivers/staging/batman-adv/main.c b/drivers/staging/batman-adv/main.c index 2686019fe4e..ef7c20ae797 100644 --- a/drivers/staging/batman-adv/main.c +++ b/drivers/staging/batman-adv/main.c @@ -250,10 +250,13 @@ int choose_orig(void *data, int32_t size) int is_my_mac(uint8_t *addr) { struct batman_if *batman_if; + rcu_read_lock(); list_for_each_entry_rcu(batman_if, &if_list, list) { - if ((batman_if->net_dev) && - (compare_orig(batman_if->net_dev->dev_addr, addr))) { + if (batman_if->if_status != IF_ACTIVE) + continue; + + if (compare_orig(batman_if->net_dev->dev_addr, addr)) { rcu_read_unlock(); return 1; } diff --git a/drivers/staging/batman-adv/originator.c b/drivers/staging/batman-adv/originator.c index 28bb627ffa1..de5a8c1a810 100644 --- a/drivers/staging/batman-adv/originator.c +++ b/drivers/staging/batman-adv/originator.c @@ -391,11 +391,12 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num) int orig_hash_add_if(struct batman_if *batman_if, int max_if_num) { struct orig_node *orig_node; + unsigned long flags; HASHIT(hashit); /* resize all orig nodes because orig_node->bcast_own(_sum) depend on * if_num */ - spin_lock(&orig_hash_lock); + spin_lock_irqsave(&orig_hash_lock, flags); while (hash_iterate(orig_hash, &hashit)) { orig_node = hashit.bucket->data; @@ -404,11 +405,11 @@ int orig_hash_add_if(struct batman_if *batman_if, int max_if_num) goto err; } - spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags); return 0; err: - spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags); return -ENOMEM; } @@ -468,12 +469,13 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num) { struct batman_if *batman_if_tmp; struct orig_node *orig_node; + unsigned long flags; HASHIT(hashit); int ret; /* resize all orig nodes because orig_node->bcast_own(_sum) depend on * if_num */ - spin_lock(&orig_hash_lock); + spin_lock_irqsave(&orig_hash_lock, flags); while (hash_iterate(orig_hash, &hashit)) { orig_node = hashit.bucket->data; @@ -500,10 +502,10 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num) rcu_read_unlock(); batman_if->if_num = -1; - spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags); return 0; err: - spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags); return -ENOMEM; } diff --git a/drivers/staging/batman-adv/routing.c b/drivers/staging/batman-adv/routing.c index 066cc9149bf..032195e6de9 100644 --- a/drivers/staging/batman-adv/routing.c +++ b/drivers/staging/batman-adv/routing.c @@ -783,6 +783,8 @@ int recv_bat_packet(struct sk_buff *skb, static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len) { + /* FIXME: each batman_if will be attached to a softif */ + struct bat_priv *bat_priv = netdev_priv(soft_device); struct orig_node *orig_node; struct icmp_packet_rr *icmp_packet; struct ethhdr *ethhdr; @@ -801,6 +803,9 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len) return NET_RX_DROP; } + if (!bat_priv->primary_if) + return NET_RX_DROP; + /* answer echo request (ping) */ /* get routing information */ spin_lock_irqsave(&orig_hash_lock, flags); @@ -830,7 +835,8 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len) } memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); - memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN); + memcpy(icmp_packet->orig, + bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); icmp_packet->msg_type = ECHO_REPLY; icmp_packet->ttl = TTL; @@ -845,6 +851,8 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len) static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len) { + /* FIXME: each batman_if will be attached to a softif */ + struct bat_priv *bat_priv = netdev_priv(soft_device); struct orig_node *orig_node; struct icmp_packet *icmp_packet; struct ethhdr *ethhdr; @@ -865,6 +873,9 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len) return NET_RX_DROP; } + if (!bat_priv->primary_if) + return NET_RX_DROP; + /* get routing information */ spin_lock_irqsave(&orig_hash_lock, flags); orig_node = ((struct orig_node *) @@ -892,7 +903,8 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len) } memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); - memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN); + memcpy(icmp_packet->orig, + bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); icmp_packet->msg_type = TTL_EXCEEDED; icmp_packet->ttl = TTL; diff --git a/drivers/staging/batman-adv/types.h b/drivers/staging/batman-adv/types.h index 21d0717afb0..9aa9d369c75 100644 --- a/drivers/staging/batman-adv/types.h +++ b/drivers/staging/batman-adv/types.h @@ -126,6 +126,7 @@ struct socket_client { unsigned char index; spinlock_t lock; wait_queue_head_t queue_wait; + struct bat_priv *bat_priv; }; struct socket_packet { diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c index c6aa52f8dce..48d9fb1227d 100644 --- a/drivers/staging/comedi/drivers/das08_cs.c +++ b/drivers/staging/comedi/drivers/das08_cs.c @@ -222,7 +222,6 @@ static int das08_pcmcia_config_loop(struct pcmcia_device *p_dev, p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= pcmcia_io_cfg_data_width(io->flags); - p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; p_dev->resource[0]->start = io->win[0].base; p_dev->resource[0]->end = io->win[0].len; if (io->nwin > 1) { diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c index 56e11575c97..64a01147eca 100644 --- a/drivers/staging/hv/netvsc_drv.c +++ b/drivers/staging/hv/netvsc_drv.c @@ -327,6 +327,9 @@ static const struct net_device_ops device_ops = { .ndo_stop = netvsc_close, .ndo_start_xmit = netvsc_start_xmit, .ndo_set_multicast_list = netvsc_set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, }; static int netvsc_probe(struct device *device) diff --git a/drivers/staging/hv/ring_buffer.c b/drivers/staging/hv/ring_buffer.c index 17bc7626f70..d78c569ac94 100644 --- a/drivers/staging/hv/ring_buffer.c +++ b/drivers/staging/hv/ring_buffer.c @@ -193,8 +193,7 @@ Description: static inline u64 GetRingBufferIndices(struct hv_ring_buffer_info *RingInfo) { - return ((u64)RingInfo->RingBuffer->WriteIndex << 32) - || RingInfo->RingBuffer->ReadIndex; + return (u64)RingInfo->RingBuffer->WriteIndex << 32; } diff --git a/drivers/staging/hv/storvsc_api.h b/drivers/staging/hv/storvsc_api.h index 0063bde9a4b..8505a1c5f9e 100644 --- a/drivers/staging/hv/storvsc_api.h +++ b/drivers/staging/hv/storvsc_api.h @@ -28,10 +28,10 @@ #include "vmbus_api.h" /* Defines */ -#define STORVSC_RING_BUFFER_SIZE (10*PAGE_SIZE) +#define STORVSC_RING_BUFFER_SIZE (20*PAGE_SIZE) #define BLKVSC_RING_BUFFER_SIZE (20*PAGE_SIZE) -#define STORVSC_MAX_IO_REQUESTS 64 +#define STORVSC_MAX_IO_REQUESTS 128 /* * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c index 075b61bd492..62882a437aa 100644 --- a/drivers/staging/hv/storvsc_drv.c +++ b/drivers/staging/hv/storvsc_drv.c @@ -495,7 +495,7 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl, /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */ - if (j == 0) + if (bounce_addr == 0) bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0); while (srclen) { @@ -556,7 +556,7 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl, destlen = orig_sgl[i].length; /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */ - if (j == 0) + if (bounce_addr == 0) bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0); while (destlen) { @@ -615,6 +615,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd, unsigned int request_size = 0; int i; struct scatterlist *sgl; + unsigned int sg_count = 0; DPRINT_DBG(STORVSC_DRV, "scmnd %p dir %d, use_sg %d buf %p len %d " "queue depth %d tagged %d", scmnd, scmnd->sc_data_direction, @@ -697,6 +698,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd, request->DataBuffer.Length = scsi_bufflen(scmnd); if (scsi_sg_count(scmnd)) { sgl = (struct scatterlist *)scsi_sglist(scmnd); + sg_count = scsi_sg_count(scmnd); /* check if we need to bounce the sgl */ if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) { @@ -731,15 +733,16 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd, scsi_sg_count(scmnd)); sgl = cmd_request->bounce_sgl; + sg_count = cmd_request->bounce_sgl_count; } request->DataBuffer.Offset = sgl[0].offset; - for (i = 0; i < scsi_sg_count(scmnd); i++) { + for (i = 0; i < sg_count; i++) { DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d\n", i, sgl[i].length, sgl[i].offset); request->DataBuffer.PfnArray[i] = - page_to_pfn(sg_page((&sgl[i]))); + page_to_pfn(sg_page((&sgl[i]))); } } else if (scsi_sglist(scmnd)) { /* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */ diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig index 638ad6b3589..9493128e5fd 100644 --- a/drivers/staging/octeon/Kconfig +++ b/drivers/staging/octeon/Kconfig @@ -1,6 +1,6 @@ config OCTEON_ETHERNET tristate "Cavium Networks Octeon Ethernet support" - depends on CPU_CAVIUM_OCTEON + depends on CPU_CAVIUM_OCTEON && NETDEVICES select PHYLIB select MDIO_OCTEON help diff --git a/drivers/staging/pohmelfs/path_entry.c b/drivers/staging/pohmelfs/path_entry.c index cdc4dd50d63..8ec83d2dffb 100644 --- a/drivers/staging/pohmelfs/path_entry.c +++ b/drivers/staging/pohmelfs/path_entry.c @@ -44,9 +44,9 @@ int pohmelfs_construct_path_string(struct pohmelfs_inode *pi, void *data, int le return -ENOENT; } - read_lock(¤t->fs->lock); + spin_lock(¤t->fs->lock); path.mnt = mntget(current->fs->root.mnt); - read_unlock(¤t->fs->lock); + spin_unlock(¤t->fs->lock); path.dentry = d; @@ -91,9 +91,9 @@ int pohmelfs_path_length(struct pohmelfs_inode *pi) return -ENOENT; } - read_lock(¤t->fs->lock); + spin_lock(¤t->fs->lock); root = dget(current->fs->root.dentry); - read_unlock(¤t->fs->lock); + spin_unlock(¤t->fs->lock); spin_lock(&dcache_lock); diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c index a0fe31de0a6..ebf9074a908 100644 --- a/drivers/staging/rt2860/usb_main_dev.c +++ b/drivers/staging/rt2860/usb_main_dev.c @@ -44,6 +44,7 @@ struct usb_device_id rtusb_usb_id[] = { {USB_DEVICE(0x07B8, 0x2870)}, /* AboCom */ {USB_DEVICE(0x07B8, 0x2770)}, /* AboCom */ {USB_DEVICE(0x0DF6, 0x0039)}, /* Sitecom 2770 */ + {USB_DEVICE(0x0DF6, 0x003F)}, /* Sitecom 2770 */ {USB_DEVICE(0x083A, 0x7512)}, /* Arcadyan 2770 */ {USB_DEVICE(0x0789, 0x0162)}, /* Logitec 2870 */ {USB_DEVICE(0x0789, 0x0163)}, /* Logitec 2870 */ @@ -95,7 +96,8 @@ struct usb_device_id rtusb_usb_id[] = { {USB_DEVICE(0x050d, 0x815c)}, {USB_DEVICE(0x1482, 0x3C09)}, /* Abocom */ {USB_DEVICE(0x14B2, 0x3C09)}, /* Alpha */ - {USB_DEVICE(0x04E8, 0x2018)}, /* samsung */ + {USB_DEVICE(0x04E8, 0x2018)}, /* samsung linkstick2 */ + {USB_DEVICE(0x1690, 0x0740)}, /* Askey */ {USB_DEVICE(0x5A57, 0x0280)}, /* Zinwell */ {USB_DEVICE(0x5A57, 0x0282)}, /* Zinwell */ {USB_DEVICE(0x7392, 0x7718)}, @@ -105,21 +107,34 @@ struct usb_device_id rtusb_usb_id[] = { {USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */ {USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */ {USB_DEVICE(0x050d, 0x815c)}, /* Belkin F5D8053 */ + {USB_DEVICE(0x100D, 0x9031)}, /* Motorola 2770 */ #endif /* RT2870 // */ #ifdef RT3070 {USB_DEVICE(0x148F, 0x3070)}, /* Ralink 3070 */ {USB_DEVICE(0x148F, 0x3071)}, /* Ralink 3071 */ {USB_DEVICE(0x148F, 0x3072)}, /* Ralink 3072 */ {USB_DEVICE(0x0DB0, 0x3820)}, /* Ralink 3070 */ + {USB_DEVICE(0x0DB0, 0x871C)}, /* Ralink 3070 */ + {USB_DEVICE(0x0DB0, 0x822C)}, /* Ralink 3070 */ + {USB_DEVICE(0x0DB0, 0x871B)}, /* Ralink 3070 */ + {USB_DEVICE(0x0DB0, 0x822B)}, /* Ralink 3070 */ {USB_DEVICE(0x0DF6, 0x003E)}, /* Sitecom 3070 */ {USB_DEVICE(0x0DF6, 0x0042)}, /* Sitecom 3072 */ + {USB_DEVICE(0x0DF6, 0x0048)}, /* Sitecom 3070 */ + {USB_DEVICE(0x0DF6, 0x0047)}, /* Sitecom 3071 */ {USB_DEVICE(0x14B2, 0x3C12)}, /* AL 3070 */ {USB_DEVICE(0x18C5, 0x0012)}, /* Corega 3070 */ {USB_DEVICE(0x083A, 0x7511)}, /* Arcadyan 3070 */ + {USB_DEVICE(0x083A, 0xA701)}, /* SMC 3070 */ + {USB_DEVICE(0x083A, 0xA702)}, /* SMC 3072 */ {USB_DEVICE(0x1740, 0x9703)}, /* EnGenius 3070 */ {USB_DEVICE(0x1740, 0x9705)}, /* EnGenius 3071 */ {USB_DEVICE(0x1740, 0x9706)}, /* EnGenius 3072 */ + {USB_DEVICE(0x1740, 0x9707)}, /* EnGenius 3070 */ + {USB_DEVICE(0x1740, 0x9708)}, /* EnGenius 3071 */ + {USB_DEVICE(0x1740, 0x9709)}, /* EnGenius 3072 */ {USB_DEVICE(0x13D3, 0x3273)}, /* AzureWave 3070 */ + {USB_DEVICE(0x13D3, 0x3305)}, /* AzureWave 3070*/ {USB_DEVICE(0x1044, 0x800D)}, /* Gigabyte GN-WB32L 3070 */ {USB_DEVICE(0x2019, 0xAB25)}, /* Planex Communications, Inc. RT3070 */ {USB_DEVICE(0x07B8, 0x3070)}, /* AboCom 3070 */ @@ -132,14 +147,36 @@ struct usb_device_id rtusb_usb_id[] = { {USB_DEVICE(0x07D1, 0x3C0D)}, /* D-Link 3070 */ {USB_DEVICE(0x07D1, 0x3C0E)}, /* D-Link 3070 */ {USB_DEVICE(0x07D1, 0x3C0F)}, /* D-Link 3070 */ + {USB_DEVICE(0x07D1, 0x3C16)}, /* D-Link 3070 */ + {USB_DEVICE(0x07D1, 0x3C17)}, /* D-Link 8070 */ {USB_DEVICE(0x1D4D, 0x000C)}, /* Pegatron Corporation 3070 */ {USB_DEVICE(0x1D4D, 0x000E)}, /* Pegatron Corporation 3070 */ {USB_DEVICE(0x5A57, 0x5257)}, /* Zinwell 3070 */ {USB_DEVICE(0x5A57, 0x0283)}, /* Zinwell 3072 */ {USB_DEVICE(0x04BB, 0x0945)}, /* I-O DATA 3072 */ + {USB_DEVICE(0x04BB, 0x0947)}, /* I-O DATA 3070 */ + {USB_DEVICE(0x04BB, 0x0948)}, /* I-O DATA 3072 */ {USB_DEVICE(0x203D, 0x1480)}, /* Encore 3070 */ + {USB_DEVICE(0x20B8, 0x8888)}, /* PARA INDUSTRIAL 3070 */ + {USB_DEVICE(0x0B05, 0x1784)}, /* Asus 3072 */ + {USB_DEVICE(0x203D, 0x14A9)}, /* Encore 3070*/ + {USB_DEVICE(0x0DB0, 0x899A)}, /* MSI 3070*/ + {USB_DEVICE(0x0DB0, 0x3870)}, /* MSI 3070*/ + {USB_DEVICE(0x0DB0, 0x870A)}, /* MSI 3070*/ + {USB_DEVICE(0x0DB0, 0x6899)}, /* MSI 3070 */ + {USB_DEVICE(0x0DB0, 0x3822)}, /* MSI 3070 */ + {USB_DEVICE(0x0DB0, 0x3871)}, /* MSI 3070 */ + {USB_DEVICE(0x0DB0, 0x871A)}, /* MSI 3070 */ + {USB_DEVICE(0x0DB0, 0x822A)}, /* MSI 3070 */ + {USB_DEVICE(0x0DB0, 0x3821)}, /* Ralink 3070 */ + {USB_DEVICE(0x0DB0, 0x821A)}, /* Ralink 3070 */ + {USB_DEVICE(0x083A, 0xA703)}, /* IO-MAGIC */ + {USB_DEVICE(0x13D3, 0x3307)}, /* Azurewave */ + {USB_DEVICE(0x13D3, 0x3321)}, /* Azurewave */ + {USB_DEVICE(0x07FA, 0x7712)}, /* Edimax */ + {USB_DEVICE(0x0789, 0x0166)}, /* Edimax */ + {USB_DEVICE(0x148F, 0x2070)}, /* Edimax */ #endif /* RT3070 // */ - {USB_DEVICE(0x0DF6, 0x003F)}, /* Sitecom WL-608 */ {USB_DEVICE(0x1737, 0x0077)}, /* Linksys WUSB54GC-EU v3 */ {USB_DEVICE(0x2001, 0x3C09)}, /* D-Link */ {USB_DEVICE(0x2001, 0x3C0A)}, /* D-Link 3072 */ diff --git a/drivers/staging/sep/Kconfig b/drivers/staging/sep/Kconfig deleted file mode 100644 index 0a9c39c7f2b..00000000000 --- a/drivers/staging/sep/Kconfig +++ /dev/null @@ -1,10 +0,0 @@ -config DX_SEP - tristate "Discretix SEP driver" -# depends on MRST - depends on RAR_REGISTER && PCI - default y - help - Discretix SEP driver - - If unsure say M. The compiled module will be - called sep_driver.ko diff --git a/drivers/staging/sep/Makefile b/drivers/staging/sep/Makefile deleted file mode 100644 index 628d5f91941..00000000000 --- a/drivers/staging/sep/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -obj-$(CONFIG_DX_SEP) := sep_driver.o - diff --git a/drivers/staging/sep/TODO b/drivers/staging/sep/TODO deleted file mode 100644 index ff0e931dab6..00000000000 --- a/drivers/staging/sep/TODO +++ /dev/null @@ -1,8 +0,0 @@ -Todo's so far (from Alan Cox) -- Fix firmware loading -- Get firmware into firmware git tree -- Review and tidy each algorithm function -- Check whether it can be plugged into any of the kernel crypto API - interfaces -- Do something about the magic shared memory interface and replace it - with something saner (in Linux terms) diff --git a/drivers/staging/sep/sep_dev.h b/drivers/staging/sep/sep_dev.h deleted file mode 100644 index 9200524bb64..00000000000 --- a/drivers/staging/sep/sep_dev.h +++ /dev/null @@ -1,110 +0,0 @@ -#ifndef __SEP_DEV_H__ -#define __SEP_DEV_H__ - -/* - * - * sep_dev.h - Security Processor Device Structures - * - * Copyright(c) 2009 Intel Corporation. All rights reserved. - * Copyright(c) 2009 Discretix. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 - * Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * CONTACTS: - * - * Alan Cox alan@linux.intel.com - * - */ - -struct sep_device { - /* pointer to pci dev */ - struct pci_dev *pdev; - - unsigned long in_use; - - /* address of the shared memory allocated during init for SEP driver - (coherent alloc) */ - void *shared_addr; - /* the physical address of the shared area */ - dma_addr_t shared_bus; - - /* restricted access region (coherent alloc) */ - dma_addr_t rar_bus; - void *rar_addr; - /* firmware regions: cache is at rar_addr */ - unsigned long cache_size; - - /* follows the cache */ - dma_addr_t resident_bus; - unsigned long resident_size; - void *resident_addr; - - /* start address of the access to the SEP registers from driver */ - void __iomem *reg_addr; - /* transaction counter that coordinates the transactions between SEP and HOST */ - unsigned long send_ct; - /* counter for the messages from sep */ - unsigned long reply_ct; - /* counter for the number of bytes allocated in the pool for the current - transaction */ - unsigned long data_pool_bytes_allocated; - - /* array of pointers to the pages that represent input data for the synchronic - DMA action */ - struct page **in_page_array; - - /* array of pointers to the pages that represent out data for the synchronic - DMA action */ - struct page **out_page_array; - - /* number of pages in the sep_in_page_array */ - unsigned long in_num_pages; - - /* number of pages in the sep_out_page_array */ - unsigned long out_num_pages; - - /* global data for every flow */ - struct sep_flow_context_t flows[SEP_DRIVER_NUM_FLOWS]; - - /* pointer to the workqueue that handles the flow done interrupts */ - struct workqueue_struct *flow_wq; - -}; - -static struct sep_device *sep_dev; - -static inline void sep_write_reg(struct sep_device *dev, int reg, u32 value) -{ - void __iomem *addr = dev->reg_addr + reg; - writel(value, addr); -} - -static inline u32 sep_read_reg(struct sep_device *dev, int reg) -{ - void __iomem *addr = dev->reg_addr + reg; - return readl(addr); -} - -/* wait for SRAM write complete(indirect write */ -static inline void sep_wait_sram_write(struct sep_device *dev) -{ - u32 reg_val; - do - reg_val = sep_read_reg(dev, HW_SRAM_DATA_READY_REG_ADDR); - while (!(reg_val & 1)); -} - - -#endif diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c deleted file mode 100644 index ecbde3467b1..00000000000 --- a/drivers/staging/sep/sep_driver.c +++ /dev/null @@ -1,2742 +0,0 @@ -/* - * - * sep_driver.c - Security Processor Driver main group of functions - * - * Copyright(c) 2009 Intel Corporation. All rights reserved. - * Copyright(c) 2009 Discretix. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 - * Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * CONTACTS: - * - * Mark Allyn mark.a.allyn@intel.com - * - * CHANGES: - * - * 2009.06.26 Initial publish - * - */ - -#include <linux/init.h> -#include <linux/module.h> -#include <linux/fs.h> -#include <linux/cdev.h> -#include <linux/kdev_t.h> -#include <linux/mutex.h> -#include <linux/sched.h> -#include <linux/mm.h> -#include <linux/poll.h> -#include <linux/wait.h> -#include <linux/pci.h> -#include <linux/firmware.h> -#include <linux/slab.h> -#include <asm/ioctl.h> -#include <linux/ioport.h> -#include <asm/io.h> -#include <linux/interrupt.h> -#include <linux/pagemap.h> -#include <asm/cacheflush.h> -#include "sep_driver_hw_defs.h" -#include "sep_driver_config.h" -#include "sep_driver_api.h" -#include "sep_dev.h" - -#if SEP_DRIVER_ARM_DEBUG_MODE - -#define CRYS_SEP_ROM_length 0x4000 -#define CRYS_SEP_ROM_start_address 0x8000C000UL -#define CRYS_SEP_ROM_start_address_offset 0xC000UL -#define SEP_ROM_BANK_register 0x80008420UL -#define SEP_ROM_BANK_register_offset 0x8420UL -#define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0x82000000 - -/* - * THESE 2 definitions are specific to the board - must be - * defined during integration - */ -#define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0xFF0D0000 - -/* 2M size */ - -static void sep_load_rom_code(struct sep_device *sep) -{ - /* Index variables */ - unsigned long i, k, j; - u32 reg; - u32 error; - u32 warning; - - /* Loading ROM from SEP_ROM_image.h file */ - k = sizeof(CRYS_SEP_ROM); - - edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n"); - - edbg("SEP Driver: k is %lu\n", k); - edbg("SEP Driver: sep->reg_addr is %p\n", sep->reg_addr); - edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset); - - for (i = 0; i < 4; i++) { - /* write bank */ - sep_write_reg(sep, SEP_ROM_BANK_register_offset, i); - - for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) { - sep_write_reg(sep, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]); - - k = k - 4; - - if (k == 0) { - j = CRYS_SEP_ROM_length; - i = 4; - } - } - } - - /* reset the SEP */ - sep_write_reg(sep, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1); - - /* poll for SEP ROM boot finish */ - do - reg = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR); - while (!reg); - - edbg("SEP Driver: ROM polling ended\n"); - - switch (reg) { - case 0x1: - /* fatal error - read erro status from GPRO */ - error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR); - edbg("SEP Driver: ROM polling case 1\n"); - break; - case 0x4: - /* Cold boot ended successfully */ - case 0x8: - /* Warmboot ended successfully */ - case 0x10: - /* ColdWarm boot ended successfully */ - error = 0; - case 0x2: - /* Boot First Phase ended */ - warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR); - case 0x20: - edbg("SEP Driver: ROM polling case %d\n", reg); - break; - } - -} - -#else -static void sep_load_rom_code(struct sep_device *sep) { } -#endif /* SEP_DRIVER_ARM_DEBUG_MODE */ - - - -/*---------------------------------------- - DEFINES ------------------------------------------*/ - -#define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000 -#define SEP_RAR_IO_MEM_REGION_SIZE 0x40000 - -/*-------------------------------------------- - GLOBAL variables ---------------------------------------------*/ - -/* debug messages level */ -static int debug; -module_param(debug, int , 0); -MODULE_PARM_DESC(debug, "Flag to enable SEP debug messages"); - -/* Keep this a single static object for now to keep the conversion easy */ - -static struct sep_device sep_instance; -static struct sep_device *sep_dev = &sep_instance; - -/* - mutex for the access to the internals of the sep driver -*/ -static DEFINE_MUTEX(sep_mutex); - - -/* wait queue head (event) of the driver */ -static DECLARE_WAIT_QUEUE_HEAD(sep_event); - -/** - * sep_load_firmware - copy firmware cache/resident - * @sep: device we are loading - * - * This functions copies the cache and resident from their source - * location into destination shared memory. - */ - -static int sep_load_firmware(struct sep_device *sep) -{ - const struct firmware *fw; - char *cache_name = "sep/cache.image.bin"; - char *res_name = "sep/resident.image.bin"; - int error; - - edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr); - edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus); - - /* load cache */ - error = request_firmware(&fw, cache_name, &sep->pdev->dev); - if (error) { - edbg("SEP Driver:cant request cache fw\n"); - return error; - } - edbg("SEP Driver:cache %08Zx@%p\n", fw->size, (void *) fw->data); - - memcpy(sep->rar_addr, (void *)fw->data, fw->size); - sep->cache_size = fw->size; - release_firmware(fw); - - sep->resident_bus = sep->rar_bus + sep->cache_size; - sep->resident_addr = sep->rar_addr + sep->cache_size; - - /* load resident */ - error = request_firmware(&fw, res_name, &sep->pdev->dev); - if (error) { - edbg("SEP Driver:cant request res fw\n"); - return error; - } - edbg("sep: res %08Zx@%p\n", fw->size, (void *)fw->data); - - memcpy(sep->resident_addr, (void *) fw->data, fw->size); - sep->resident_size = fw->size; - release_firmware(fw); - - edbg("sep: resident v %p b %08llx cache v %p b %08llx\n", - sep->resident_addr, (unsigned long long)sep->resident_bus, - sep->rar_addr, (unsigned long long)sep->rar_bus); - return 0; -} - -MODULE_FIRMWARE("sep/cache.image.bin"); -MODULE_FIRMWARE("sep/resident.image.bin"); - -/** - * sep_map_and_alloc_shared_area - allocate shared block - * @sep: security processor - * @size: size of shared area - * - * Allocate a shared buffer in host memory that can be used by both the - * kernel and also the hardware interface via DMA. - */ - -static int sep_map_and_alloc_shared_area(struct sep_device *sep, - unsigned long size) -{ - /* shared_addr = ioremap_nocache(0xda00000,shared_area_size); */ - sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, size, - &sep->shared_bus, GFP_KERNEL); - - if (!sep->shared_addr) { - edbg("sep_driver :shared memory dma_alloc_coherent failed\n"); - return -ENOMEM; - } - /* set the bus address of the shared area */ - edbg("sep: shared_addr %ld bytes @%p (bus %08llx)\n", - size, sep->shared_addr, (unsigned long long)sep->shared_bus); - return 0; -} - -/** - * sep_unmap_and_free_shared_area - free shared block - * @sep: security processor - * - * Free the shared area allocated to the security processor. The - * processor must have finished with this and any final posted - * writes cleared before we do so. - */ -static void sep_unmap_and_free_shared_area(struct sep_device *sep, int size) -{ - dma_free_coherent(&sep->pdev->dev, size, - sep->shared_addr, sep->shared_bus); -} - -/** - * sep_shared_virt_to_bus - convert bus/virt addresses - * - * Returns the bus address inside the shared area according - * to the virtual address. - */ - -static dma_addr_t sep_shared_virt_to_bus(struct sep_device *sep, - void *virt_address) -{ - dma_addr_t pa = sep->shared_bus + (virt_address - sep->shared_addr); - edbg("sep: virt to bus b %08llx v %p\n", (unsigned long long) pa, - virt_address); - return pa; -} - -/** - * sep_shared_bus_to_virt - convert bus/virt addresses - * - * Returns virtual address inside the shared area according - * to the bus address. - */ - -static void *sep_shared_bus_to_virt(struct sep_device *sep, - dma_addr_t bus_address) -{ - return sep->shared_addr + (bus_address - sep->shared_bus); -} - - -/** - * sep_try_open - attempt to open a SEP device - * @sep: device to attempt to open - * - * Atomically attempt to get ownership of a SEP device. - * Returns 1 if the device was opened, 0 on failure. - */ - -static int sep_try_open(struct sep_device *sep) -{ - if (!test_and_set_bit(0, &sep->in_use)) - return 1; - return 0; -} - -/** - * sep_open - device open method - * @inode: inode of sep device - * @filp: file handle to sep device - * - * Open method for the SEP device. Called when userspace opens - * the SEP device node. Must also release the memory data pool - * allocations. - * - * Returns zero on success otherwise an error code. - */ - -static int sep_open(struct inode *inode, struct file *filp) -{ - if (sep_dev == NULL) - return -ENODEV; - - /* check the blocking mode */ - if (filp->f_flags & O_NDELAY) { - if (sep_try_open(sep_dev) == 0) - return -EAGAIN; - } else - if (wait_event_interruptible(sep_event, sep_try_open(sep_dev)) < 0) - return -EINTR; - - /* Bind to the device, we only have one which makes it easy */ - filp->private_data = sep_dev; - /* release data pool allocations */ - sep_dev->data_pool_bytes_allocated = 0; - return 0; -} - - -/** - * sep_release - close a SEP device - * @inode: inode of SEP device - * @filp: file handle being closed - * - * Called on the final close of a SEP device. As the open protects against - * multiple simultaenous opens that means this method is called when the - * final reference to the open handle is dropped. - */ - -static int sep_release(struct inode *inode, struct file *filp) -{ - struct sep_device *sep = filp->private_data; -#if 0 /*!SEP_DRIVER_POLLING_MODE */ - /* close IMR */ - sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF); - /* release IRQ line */ - free_irq(SEP_DIRVER_IRQ_NUM, sep); - -#endif - /* Ensure any blocked open progresses */ - clear_bit(0, &sep->in_use); - wake_up(&sep_event); - return 0; -} - -/*--------------------------------------------------------------- - map function - this functions maps the message shared area ------------------------------------------------------------------*/ -static int sep_mmap(struct file *filp, struct vm_area_struct *vma) -{ - dma_addr_t bus_addr; - struct sep_device *sep = filp->private_data; - - dbg("-------->SEP Driver: mmap start\n"); - - /* check that the size of the mapped range is as the size of the message - shared area */ - if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) { - edbg("SEP Driver mmap requested size is more than allowed\n"); - printk(KERN_WARNING "SEP Driver mmap requested size is more than allowed\n"); - printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end); - printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start); - return -EAGAIN; - } - - edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr); - - /* get bus address */ - bus_addr = sep->shared_bus; - - edbg("SEP Driver: phys_addr is %08llx\n", (unsigned long long)bus_addr); - - if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) { - edbg("SEP Driver remap_page_range failed\n"); - printk(KERN_WARNING "SEP Driver remap_page_range failed\n"); - return -EAGAIN; - } - - dbg("SEP Driver:<-------- mmap end\n"); - - return 0; -} - - -/*----------------------------------------------- - poll function -*----------------------------------------------*/ -static unsigned int sep_poll(struct file *filp, poll_table * wait) -{ - unsigned long count; - unsigned int mask = 0; - unsigned long retval = 0; /* flow id */ - struct sep_device *sep = filp->private_data; - - dbg("---------->SEP Driver poll: start\n"); - - -#if SEP_DRIVER_POLLING_MODE - - while (sep->send_ct != (retval & 0x7FFFFFFF)) { - retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR); - - for (count = 0; count < 10 * 4; count += 4) - edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count))); - } - - sep->reply_ct++; -#else - /* add the event to the polling wait table */ - poll_wait(filp, &sep_event, wait); - -#endif - - edbg("sep->send_ct is %lu\n", sep->send_ct); - edbg("sep->reply_ct is %lu\n", sep->reply_ct); - - /* check if the data is ready */ - if (sep->send_ct == sep->reply_ct) { - for (count = 0; count < 12 * 4; count += 4) - edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + count))); - - for (count = 0; count < 10 * 4; count += 4) - edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + 0x1800 + count))); - - retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR); - edbg("retval is %lu\n", retval); - /* check if the this is sep reply or request */ - if (retval >> 31) { - edbg("SEP Driver: sep request in\n"); - /* request */ - mask |= POLLOUT | POLLWRNORM; - } else { - edbg("SEP Driver: sep reply in\n"); - mask |= POLLIN | POLLRDNORM; - } - } - dbg("SEP Driver:<-------- poll exit\n"); - return mask; -} - -/** - * sep_time_address - address in SEP memory of time - * @sep: SEP device we want the address from - * - * Return the address of the two dwords in memory used for time - * setting. - */ - -static u32 *sep_time_address(struct sep_device *sep) -{ - return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES; -} - -/** - * sep_set_time - set the SEP time - * @sep: the SEP we are setting the time for - * - * Calculates time and sets it at the predefined address. - * Called with the sep mutex held. - */ -static unsigned long sep_set_time(struct sep_device *sep) -{ - struct timeval time; - u32 *time_addr; /* address of time as seen by the kernel */ - - - dbg("sep:sep_set_time start\n"); - - do_gettimeofday(&time); - - /* set value in the SYSTEM MEMORY offset */ - time_addr = sep_time_address(sep); - - time_addr[0] = SEP_TIME_VAL_TOKEN; - time_addr[1] = time.tv_sec; - - edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec); - edbg("SEP Driver:time_addr is %p\n", time_addr); - edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr); - - return time.tv_sec; -} - -/** - * sep_dump_message - dump the message that is pending - * @sep: sep device - * - * Dump out the message pending in the shared message area - */ - -static void sep_dump_message(struct sep_device *sep) -{ - int count; - for (count = 0; count < 12 * 4; count += 4) - edbg("Word %d of the message is %u\n", count, *((u32 *) (sep->shared_addr + count))); -} - -/** - * sep_send_command_handler - kick off a command - * @sep: sep being signalled - * - * This function raises interrupt to SEP that signals that is has a new - * command from the host - */ - -static void sep_send_command_handler(struct sep_device *sep) -{ - dbg("sep:sep_send_command_handler start\n"); - - mutex_lock(&sep_mutex); - sep_set_time(sep); - - /* FIXME: flush cache */ - flush_cache_all(); - - sep_dump_message(sep); - /* update counter */ - sep->send_ct++; - /* send interrupt to SEP */ - sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2); - dbg("SEP Driver:<-------- sep_send_command_handler end\n"); - mutex_unlock(&sep_mutex); - return; -} - -/** - * sep_send_reply_command_handler - kick off a command reply - * @sep: sep being signalled - * - * This function raises interrupt to SEP that signals that is has a new - * command from the host - */ - -static void sep_send_reply_command_handler(struct sep_device *sep) -{ - dbg("sep:sep_send_reply_command_handler start\n"); - - /* flash cache */ - flush_cache_all(); - - sep_dump_message(sep); - - mutex_lock(&sep_mutex); - sep->send_ct++; /* update counter */ - /* send the interrupt to SEP */ - sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct); - /* update both counters */ - sep->send_ct++; - sep->reply_ct++; - mutex_unlock(&sep_mutex); - dbg("sep: sep_send_reply_command_handler end\n"); -} - -/* - This function handles the allocate data pool memory request - This function returns calculates the bus address of the - allocated memory, and the offset of this area from the mapped address. - Therefore, the FVOs in user space can calculate the exact virtual - address of this allocated memory -*/ -static int sep_allocate_data_pool_memory_handler(struct sep_device *sep, - unsigned long arg) -{ - int error; - struct sep_driver_alloc_t command_args; - - dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n"); - - error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t)); - if (error) { - error = -EFAULT; - goto end_function; - } - - /* allocate memory */ - if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) { - error = -ENOMEM; - goto end_function; - } - - /* set the virtual and bus address */ - command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated; - command_args.phys_address = sep->shared_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated; - - /* write the memory back to the user space */ - error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t)); - if (error) { - error = -EFAULT; - goto end_function; - } - - /* set the allocation */ - sep->data_pool_bytes_allocated += command_args.num_bytes; - -end_function: - dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n"); - return error; -} - -/* - This function handles write into allocated data pool command -*/ -static int sep_write_into_data_pool_handler(struct sep_device *sep, unsigned long arg) -{ - int error; - void *virt_address; - unsigned long va; - unsigned long app_in_address; - unsigned long num_bytes; - void *data_pool_area_addr; - - dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n"); - - /* get the application address */ - error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address)); - if (error) - goto end_function; - - /* get the virtual kernel address address */ - error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address)); - if (error) - goto end_function; - virt_address = (void *)va; - - /* get the number of bytes */ - error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes)); - if (error) - goto end_function; - - /* calculate the start of the data pool */ - data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES; - - - /* check that the range of the virtual kernel address is correct */ - if (virt_address < data_pool_area_addr || virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)) { - error = -EINVAL; - goto end_function; - } - /* copy the application data */ - error = copy_from_user(virt_address, (void *) app_in_address, num_bytes); - if (error) - error = -EFAULT; -end_function: - dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n"); - return error; -} - -/* - this function handles the read from data pool command -*/ -static int sep_read_from_data_pool_handler(struct sep_device *sep, unsigned long arg) -{ - int error; - /* virtual address of dest application buffer */ - unsigned long app_out_address; - /* virtual address of the data pool */ - unsigned long va; - void *virt_address; - unsigned long num_bytes; - void *data_pool_area_addr; - - dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n"); - - /* get the application address */ - error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address)); - if (error) - goto end_function; - - /* get the virtual kernel address address */ - error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address)); - if (error) - goto end_function; - virt_address = (void *)va; - - /* get the number of bytes */ - error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes)); - if (error) - goto end_function; - - /* calculate the start of the data pool */ - data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES; - - /* FIXME: These are incomplete all over the driver: what about + len - and when doing that also overflows */ - /* check that the range of the virtual kernel address is correct */ - if (virt_address < data_pool_area_addr || virt_address > data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) { - error = -EINVAL; - goto end_function; - } - - /* copy the application data */ - error = copy_to_user((void *) app_out_address, virt_address, num_bytes); - if (error) - error = -EFAULT; -end_function: - dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n"); - return error; -} - -/* - This function releases all the application virtual buffer physical pages, - that were previously locked -*/ -static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag) -{ - unsigned long count; - - if (dirtyFlag) { - for (count = 0; count < num_pages; count++) { - /* the out array was written, therefore the data was changed */ - if (!PageReserved(page_array_ptr[count])) - SetPageDirty(page_array_ptr[count]); - page_cache_release(page_array_ptr[count]); - } - } else { - /* free in pages - the data was only read, therefore no update was done - on those pages */ - for (count = 0; count < num_pages; count++) - page_cache_release(page_array_ptr[count]); - } - - if (page_array_ptr) - /* free the array */ - kfree(page_array_ptr); - - return 0; -} - -/* - This function locks all the physical pages of the kernel virtual buffer - and construct a basic lli array, where each entry holds the physical - page address and the size that application data holds in this physical pages -*/ -static int sep_lock_kernel_pages(struct sep_device *sep, - unsigned long kernel_virt_addr, - unsigned long data_size, - unsigned long *num_pages_ptr, - struct sep_lli_entry_t **lli_array_ptr, - struct page ***page_array_ptr) -{ - int error = 0; - /* the the page of the end address of the user space buffer */ - unsigned long end_page; - /* the page of the start address of the user space buffer */ - unsigned long start_page; - /* the range in pages */ - unsigned long num_pages; - struct sep_lli_entry_t *lli_array; - /* next kernel address to map */ - unsigned long next_kernel_address; - unsigned long count; - - dbg("SEP Driver:--------> sep_lock_kernel_pages start\n"); - - /* set start and end pages and num pages */ - end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT; - start_page = kernel_virt_addr >> PAGE_SHIFT; - num_pages = end_page - start_page + 1; - - edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr); - edbg("SEP Driver: data_size is %lu\n", data_size); - edbg("SEP Driver: start_page is %lx\n", start_page); - edbg("SEP Driver: end_page is %lx\n", end_page); - edbg("SEP Driver: num_pages is %lu\n", num_pages); - - lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC); - if (!lli_array) { - edbg("SEP Driver: kmalloc for lli_array failed\n"); - error = -ENOMEM; - goto end_function; - } - - /* set the start address of the first page - app data may start not at - the beginning of the page */ - lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr); - - /* check that not all the data is in the first page only */ - if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size) - lli_array[0].block_size = data_size; - else - lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK)); - - /* debug print */ - dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size); - - /* advance the address to the start of the next page */ - next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE; - - /* go from the second page to the prev before last */ - for (count = 1; count < (num_pages - 1); count++) { - lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address); - lli_array[count].block_size = PAGE_SIZE; - - edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size); - next_kernel_address += PAGE_SIZE; - } - - /* if more then 1 pages locked - then update for the last page size needed */ - if (num_pages > 1) { - /* update the address of the last page */ - lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address); - - /* set the size of the last page */ - lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK); - - if (lli_array[count].block_size == 0) { - dbg("app_virt_addr is %08lx\n", kernel_virt_addr); - dbg("data_size is %lu\n", data_size); - while (1); - } - - edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size); - } - /* set output params */ - *lli_array_ptr = lli_array; - *num_pages_ptr = num_pages; - *page_array_ptr = 0; -end_function: - dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n"); - return 0; -} - -/* - This function locks all the physical pages of the application virtual buffer - and construct a basic lli array, where each entry holds the physical page - address and the size that application data holds in this physical pages -*/ -static int sep_lock_user_pages(struct sep_device *sep, - unsigned long app_virt_addr, - unsigned long data_size, - unsigned long *num_pages_ptr, - struct sep_lli_entry_t **lli_array_ptr, - struct page ***page_array_ptr) -{ - int error = 0; - /* the the page of the end address of the user space buffer */ - unsigned long end_page; - /* the page of the start address of the user space buffer */ - unsigned long start_page; - /* the range in pages */ - unsigned long num_pages; - struct page **page_array; - struct sep_lli_entry_t *lli_array; - unsigned long count; - int result; - - dbg("SEP Driver:--------> sep_lock_user_pages start\n"); - - /* set start and end pages and num pages */ - end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT; - start_page = app_virt_addr >> PAGE_SHIFT; - num_pages = end_page - start_page + 1; - - edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr); - edbg("SEP Driver: data_size is %lu\n", data_size); - edbg("SEP Driver: start_page is %lu\n", start_page); - edbg("SEP Driver: end_page is %lu\n", end_page); - edbg("SEP Driver: num_pages is %lu\n", num_pages); - - /* allocate array of pages structure pointers */ - page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC); - if (!page_array) { - edbg("SEP Driver: kmalloc for page_array failed\n"); - - error = -ENOMEM; - goto end_function; - } - - lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC); - if (!lli_array) { - edbg("SEP Driver: kmalloc for lli_array failed\n"); - - error = -ENOMEM; - goto end_function_with_error1; - } - - /* convert the application virtual address into a set of physical */ - down_read(¤t->mm->mmap_sem); - result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0); - up_read(¤t->mm->mmap_sem); - - /* check the number of pages locked - if not all then exit with error */ - if (result != num_pages) { - dbg("SEP Driver: not all pages locked by get_user_pages\n"); - - error = -ENOMEM; - goto end_function_with_error2; - } - - /* flush the cache */ - for (count = 0; count < num_pages; count++) - flush_dcache_page(page_array[count]); - - /* set the start address of the first page - app data may start not at - the beginning of the page */ - lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK)); - - /* check that not all the data is in the first page only */ - if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size) - lli_array[0].block_size = data_size; - else - lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK)); - - /* debug print */ - dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size); - - /* go from the second page to the prev before last */ - for (count = 1; count < (num_pages - 1); count++) { - lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]); - lli_array[count].block_size = PAGE_SIZE; - - edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size); - } - - /* if more then 1 pages locked - then update for the last page size needed */ - if (num_pages > 1) { - /* update the address of the last page */ - lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]); - - /* set the size of the last page */ - lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK); - - if (lli_array[count].block_size == 0) { - dbg("app_virt_addr is %08lx\n", app_virt_addr); - dbg("data_size is %lu\n", data_size); - while (1); - } - edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", - count, lli_array[count].physical_address, - count, lli_array[count].block_size); - } - - /* set output params */ - *lli_array_ptr = lli_array; - *num_pages_ptr = num_pages; - *page_array_ptr = page_array; - goto end_function; - -end_function_with_error2: - /* release the cache */ - for (count = 0; count < num_pages; count++) - page_cache_release(page_array[count]); - kfree(lli_array); -end_function_with_error1: - kfree(page_array); -end_function: - dbg("SEP Driver:<-------- sep_lock_user_pages end\n"); - return 0; -} - - -/* - this function calculates the size of data that can be inserted into the lli - table from this array the condition is that either the table is full - (all etnries are entered), or there are no more entries in the lli array -*/ -static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries) -{ - unsigned long table_data_size = 0; - unsigned long counter; - - /* calculate the data in the out lli table if till we fill the whole - table or till the data has ended */ - for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++) - table_data_size += lli_in_array_ptr[counter].block_size; - return table_data_size; -} - -/* - this functions builds ont lli table from the lli_array according to - the given size of data -*/ -static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size) -{ - unsigned long curr_table_data_size; - /* counter of lli array entry */ - unsigned long array_counter; - - dbg("SEP Driver:--------> sep_build_lli_table start\n"); - - /* init currrent table data size and lli array entry counter */ - curr_table_data_size = 0; - array_counter = 0; - *num_table_entries_ptr = 1; - - edbg("SEP Driver:table_data_size is %lu\n", table_data_size); - - /* fill the table till table size reaches the needed amount */ - while (curr_table_data_size < table_data_size) { - /* update the number of entries in table */ - (*num_table_entries_ptr)++; - - lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address; - lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size; - curr_table_data_size += lli_table_ptr->block_size; - - edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr); - edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address); - edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size); - - /* check for overflow of the table data */ - if (curr_table_data_size > table_data_size) { - edbg("SEP Driver:curr_table_data_size > table_data_size\n"); - - /* update the size of block in the table */ - lli_table_ptr->block_size -= (curr_table_data_size - table_data_size); - - /* update the physical address in the lli array */ - lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size; - - /* update the block size left in the lli array */ - lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size); - } else - /* advance to the next entry in the lli_array */ - array_counter++; - - edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address); - edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size); - - /* move to the next entry in table */ - lli_table_ptr++; - } - - /* set the info entry to default */ - lli_table_ptr->physical_address = 0xffffffff; - lli_table_ptr->block_size = 0; - - edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr); - edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address); - edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size); - - /* set the output parameter */ - *num_processed_entries_ptr += array_counter; - - edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr); - dbg("SEP Driver:<-------- sep_build_lli_table end\n"); - return; -} - -/* - this function goes over the list of the print created tables and - prints all the data -*/ -static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size) -{ - unsigned long table_count; - unsigned long entries_count; - - dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n"); - - table_count = 1; - while ((unsigned long) lli_table_ptr != 0xffffffff) { - edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size); - edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries); - - /* print entries of the table (without info entry) */ - for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) { - edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr); - edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size); - } - - /* point to the info entry */ - lli_table_ptr--; - - edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size); - edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address); - - - table_data_size = lli_table_ptr->block_size & 0xffffff; - num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff; - lli_table_ptr = (struct sep_lli_entry_t *) - (lli_table_ptr->physical_address); - - edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr); - - if ((unsigned long) lli_table_ptr != 0xffffffff) - lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_bus_to_virt(sep, (unsigned long) lli_table_ptr); - - table_count++; - } - dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n"); -} - - -/* - This function prepares only input DMA table for synhronic symmetric - operations (HASH) -*/ -static int sep_prepare_input_dma_table(struct sep_device *sep, - unsigned long app_virt_addr, - unsigned long data_size, - unsigned long block_size, - unsigned long *lli_table_ptr, - unsigned long *num_entries_ptr, - unsigned long *table_data_size_ptr, - bool isKernelVirtualAddress) -{ - /* pointer to the info entry of the table - the last entry */ - struct sep_lli_entry_t *info_entry_ptr; - /* array of pointers ot page */ - struct sep_lli_entry_t *lli_array_ptr; - /* points to the first entry to be processed in the lli_in_array */ - unsigned long current_entry; - /* num entries in the virtual buffer */ - unsigned long sep_lli_entries; - /* lli table pointer */ - struct sep_lli_entry_t *in_lli_table_ptr; - /* the total data in one table */ - unsigned long table_data_size; - /* number of entries in lli table */ - unsigned long num_entries_in_table; - /* next table address */ - void *lli_table_alloc_addr; - unsigned long result; - - dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n"); - - edbg("SEP Driver:data_size is %lu\n", data_size); - edbg("SEP Driver:block_size is %lu\n", block_size); - - /* initialize the pages pointers */ - sep->in_page_array = 0; - sep->in_num_pages = 0; - - if (data_size == 0) { - /* special case - created 2 entries table with zero data */ - in_lli_table_ptr = (struct sep_lli_entry_t *) (sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES); - /* FIXME: Should the entry below not be for _bus */ - in_lli_table_ptr->physical_address = (unsigned long)sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES; - in_lli_table_ptr->block_size = 0; - - in_lli_table_ptr++; - in_lli_table_ptr->physical_address = 0xFFFFFFFF; - in_lli_table_ptr->block_size = 0; - - *lli_table_ptr = sep->shared_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES; - *num_entries_ptr = 2; - *table_data_size_ptr = 0; - - goto end_function; - } - - /* check if the pages are in Kernel Virtual Address layout */ - if (isKernelVirtualAddress == true) - /* lock the pages of the kernel buffer and translate them to pages */ - result = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array); - else - /* lock the pages of the user buffer and translate them to pages */ - result = sep_lock_user_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array); - - if (result) - return result; - - edbg("SEP Driver:output sep->in_num_pages is %lu\n", sep->in_num_pages); - - current_entry = 0; - info_entry_ptr = 0; - sep_lli_entries = sep->in_num_pages; - - /* initiate to point after the message area */ - lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES; - - /* loop till all the entries in in array are not processed */ - while (current_entry < sep_lli_entries) { - /* set the new input and output tables */ - in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr; - - lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; - - /* calculate the maximum size of data for input table */ - table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry)); - - /* now calculate the table size so that it will be module block size */ - table_data_size = (table_data_size / block_size) * block_size; - - edbg("SEP Driver:output table_data_size is %lu\n", table_data_size); - - /* construct input lli table */ - sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, ¤t_entry, &num_entries_in_table, table_data_size); - - if (info_entry_ptr == 0) { - /* set the output parameters to physical addresses */ - *lli_table_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr); - *num_entries_ptr = num_entries_in_table; - *table_data_size_ptr = table_data_size; - - edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr); - } else { - /* update the info entry of the previous in table */ - info_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr); - info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size); - } - - /* save the pointer to the info entry of the current tables */ - info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1; - } - - /* print input tables */ - sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *) - sep_shared_bus_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr); - - /* the array of the pages */ - kfree(lli_array_ptr); -end_function: - dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n"); - return 0; - -} - -/* - This function creates the input and output dma tables for - symmetric operations (AES/DES) according to the block size from LLI arays -*/ -static int sep_construct_dma_tables_from_lli(struct sep_device *sep, - struct sep_lli_entry_t *lli_in_array, - unsigned long sep_in_lli_entries, - struct sep_lli_entry_t *lli_out_array, - unsigned long sep_out_lli_entries, - unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr) -{ - /* points to the area where next lli table can be allocated: keep void * - as there is pointer scaling to fix otherwise */ - void *lli_table_alloc_addr; - /* input lli table */ - struct sep_lli_entry_t *in_lli_table_ptr; - /* output lli table */ - struct sep_lli_entry_t *out_lli_table_ptr; - /* pointer to the info entry of the table - the last entry */ - struct sep_lli_entry_t *info_in_entry_ptr; - /* pointer to the info entry of the table - the last entry */ - struct sep_lli_entry_t *info_out_entry_ptr; - /* points to the first entry to be processed in the lli_in_array */ - unsigned long current_in_entry; - /* points to the first entry to be processed in the lli_out_array */ - unsigned long current_out_entry; - /* max size of the input table */ - unsigned long in_table_data_size; - /* max size of the output table */ - unsigned long out_table_data_size; - /* flag te signifies if this is the first tables build from the arrays */ - unsigned long first_table_flag; - /* the data size that should be in table */ - unsigned long table_data_size; - /* number of etnries in the input table */ - unsigned long num_entries_in_table; - /* number of etnries in the output table */ - unsigned long num_entries_out_table; - - dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n"); - - /* initiate to pint after the message area */ - lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES; - - current_in_entry = 0; - current_out_entry = 0; - first_table_flag = 1; - info_in_entry_ptr = 0; - info_out_entry_ptr = 0; - - /* loop till all the entries in in array are not processed */ - while (current_in_entry < sep_in_lli_entries) { - /* set the new input and output tables */ - in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr; - - lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; - - /* set the first output tables */ - out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr; - - lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; - - /* calculate the maximum size of data for input table */ - in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry)); - - /* calculate the maximum size of data for output table */ - out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry)); - - edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size); - edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size); - - /* check where the data is smallest */ - table_data_size = in_table_data_size; - if (table_data_size > out_table_data_size) - table_data_size = out_table_data_size; - - /* now calculate the table size so that it will be module block size */ - table_data_size = (table_data_size / block_size) * block_size; - - dbg("SEP Driver:table_data_size is %lu\n", table_data_size); - - /* construct input lli table */ - sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, ¤t_in_entry, &num_entries_in_table, table_data_size); - - /* construct output lli table */ - sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, ¤t_out_entry, &num_entries_out_table, table_data_size); - - /* if info entry is null - this is the first table built */ - if (info_in_entry_ptr == 0) { - /* set the output parameters to physical addresses */ - *lli_table_in_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr); - *in_num_entries_ptr = num_entries_in_table; - *lli_table_out_ptr = sep_shared_virt_to_bus(sep, out_lli_table_ptr); - *out_num_entries_ptr = num_entries_out_table; - *table_data_size_ptr = table_data_size; - - edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr); - edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr); - } else { - /* update the info entry of the previous in table */ - info_in_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr); - info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size); - - /* update the info entry of the previous in table */ - info_out_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, out_lli_table_ptr); - info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size); - } - - /* save the pointer to the info entry of the current tables */ - info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1; - info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1; - - edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table); - edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr); - edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr); - } - - /* print input tables */ - sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *) - sep_shared_bus_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr); - /* print output tables */ - sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *) - sep_shared_bus_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr); - dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n"); - return 0; -} - - -/* - This function builds input and output DMA tables for synhronic - symmetric operations (AES, DES). It also checks that each table - is of the modular block size -*/ -static int sep_prepare_input_output_dma_table(struct sep_device *sep, - unsigned long app_virt_in_addr, - unsigned long app_virt_out_addr, - unsigned long data_size, - unsigned long block_size, - unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress) -{ - /* array of pointers of page */ - struct sep_lli_entry_t *lli_in_array; - /* array of pointers of page */ - struct sep_lli_entry_t *lli_out_array; - int result = 0; - - dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n"); - - /* initialize the pages pointers */ - sep->in_page_array = 0; - sep->out_page_array = 0; - - /* check if the pages are in Kernel Virtual Address layout */ - if (isKernelVirtualAddress == true) { - /* lock the pages of the kernel buffer and translate them to pages */ - result = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array); - if (result) { - edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n"); - goto end_function; - } - } else { - /* lock the pages of the user buffer and translate them to pages */ - result = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array); - if (result) { - edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n"); - goto end_function; - } - } - - if (isKernelVirtualAddress == true) { - result = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array); - if (result) { - edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n"); - goto end_function_with_error1; - } - } else { - result = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array); - if (result) { - edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n"); - goto end_function_with_error1; - } - } - edbg("sep->in_num_pages is %lu\n", sep->in_num_pages); - edbg("sep->out_num_pages is %lu\n", sep->out_num_pages); - edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP); - - - /* call the fucntion that creates table from the lli arrays */ - result = sep_construct_dma_tables_from_lli(sep, lli_in_array, sep->in_num_pages, lli_out_array, sep->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr); - if (result) { - edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n"); - goto end_function_with_error2; - } - - /* fall through - free the lli entry arrays */ - dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr); - dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr); - dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr); -end_function_with_error2: - kfree(lli_out_array); -end_function_with_error1: - kfree(lli_in_array); -end_function: - dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result); - return result; - -} - -/* - this function handles tha request for creation of the DMA table - for the synchronic symmetric operations (AES,DES) -*/ -static int sep_create_sync_dma_tables_handler(struct sep_device *sep, - unsigned long arg) -{ - int error; - /* command arguments */ - struct sep_driver_build_sync_table_t command_args; - - dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n"); - - error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t)); - if (error) { - error = -EFAULT; - goto end_function; - } - - edbg("app_in_address is %08lx\n", command_args.app_in_address); - edbg("app_out_address is %08lx\n", command_args.app_out_address); - edbg("data_size is %lu\n", command_args.data_in_size); - edbg("block_size is %lu\n", command_args.block_size); - - /* check if we need to build only input table or input/output */ - if (command_args.app_out_address) - /* prepare input and output tables */ - error = sep_prepare_input_output_dma_table(sep, - command_args.app_in_address, - command_args.app_out_address, - command_args.data_in_size, - command_args.block_size, - &command_args.in_table_address, - &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress); - else - /* prepare input tables */ - error = sep_prepare_input_dma_table(sep, - command_args.app_in_address, - command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress); - - if (error) - goto end_function; - /* copy to user */ - if (copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t))) - error = -EFAULT; -end_function: - dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n"); - return error; -} - -/* - this function handles the request for freeing dma table for synhronic actions -*/ -static int sep_free_dma_table_data_handler(struct sep_device *sep) -{ - dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n"); - - /* free input pages array */ - sep_free_dma_pages(sep->in_page_array, sep->in_num_pages, 0); - - /* free output pages array if needed */ - if (sep->out_page_array) - sep_free_dma_pages(sep->out_page_array, sep->out_num_pages, 1); - - /* reset all the values */ - sep->in_page_array = 0; - sep->out_page_array = 0; - sep->in_num_pages = 0; - sep->out_num_pages = 0; - dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n"); - return 0; -} - -/* - this function find a space for the new flow dma table -*/ -static int sep_find_free_flow_dma_table_space(struct sep_device *sep, - unsigned long **table_address_ptr) -{ - int error = 0; - /* pointer to the id field of the flow dma table */ - unsigned long *start_table_ptr; - /* Do not make start_addr unsigned long * unless fixing the offset - computations ! */ - void *flow_dma_area_start_addr; - unsigned long *flow_dma_area_end_addr; - /* maximum table size in words */ - unsigned long table_size_in_words; - - /* find the start address of the flow DMA table area */ - flow_dma_area_start_addr = sep->shared_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES; - - /* set end address of the flow table area */ - flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES; - - /* set table size in words */ - table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2; - - /* set the pointer to the start address of DMA area */ - start_table_ptr = flow_dma_area_start_addr; - - /* find the space for the next table */ - while (((*start_table_ptr & 0x7FFFFFFF) != 0) && start_table_ptr < flow_dma_area_end_addr) - start_table_ptr += table_size_in_words; - - /* check if we reached the end of floa tables area */ - if (start_table_ptr >= flow_dma_area_end_addr) - error = -1; - else - *table_address_ptr = start_table_ptr; - - return error; -} - -/* - This function creates one DMA table for flow and returns its data, - and pointer to its info entry -*/ -static int sep_prepare_one_flow_dma_table(struct sep_device *sep, - unsigned long virt_buff_addr, - unsigned long virt_buff_size, - struct sep_lli_entry_t *table_data, - struct sep_lli_entry_t **info_entry_ptr, - struct sep_flow_context_t *flow_data_ptr, - bool isKernelVirtualAddress) -{ - int error; - /* the range in pages */ - unsigned long lli_array_size; - struct sep_lli_entry_t *lli_array; - struct sep_lli_entry_t *flow_dma_table_entry_ptr; - unsigned long *start_dma_table_ptr; - /* total table data counter */ - unsigned long dma_table_data_count; - /* pointer that will keep the pointer to the pages of the virtual buffer */ - struct page **page_array_ptr; - unsigned long entry_count; - - /* find the space for the new table */ - error = sep_find_free_flow_dma_table_space(sep, &start_dma_table_ptr); - if (error) - goto end_function; - - /* check if the pages are in Kernel Virtual Address layout */ - if (isKernelVirtualAddress == true) - /* lock kernel buffer in the memory */ - error = sep_lock_kernel_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr); - else - /* lock user buffer in the memory */ - error = sep_lock_user_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr); - - if (error) - goto end_function; - - /* set the pointer to page array at the beginning of table - this table is - now considered taken */ - *start_dma_table_ptr = lli_array_size; - - /* point to the place of the pages pointers of the table */ - start_dma_table_ptr++; - - /* set the pages pointer */ - *start_dma_table_ptr = (unsigned long) page_array_ptr; - - /* set the pointer to the first entry */ - flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr); - - /* now create the entries for table */ - for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) { - flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address; - - flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size; - - /* set the total data of a table */ - dma_table_data_count += lli_array[entry_count].block_size; - - flow_dma_table_entry_ptr++; - } - - /* set the physical address */ - table_data->physical_address = virt_to_phys(start_dma_table_ptr); - - /* set the num_entries and total data size */ - table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count); - - /* set the info entry */ - flow_dma_table_entry_ptr->physical_address = 0xffffffff; - flow_dma_table_entry_ptr->block_size = 0; - - /* set the pointer to info entry */ - *info_entry_ptr = flow_dma_table_entry_ptr; - - /* the array of the lli entries */ - kfree(lli_array); -end_function: - return error; -} - - - -/* - This function creates a list of tables for flow and returns the data for - the first and last tables of the list -*/ -static int sep_prepare_flow_dma_tables(struct sep_device *sep, - unsigned long num_virtual_buffers, - unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress) -{ - int error; - unsigned long virt_buff_addr; - unsigned long virt_buff_size; - struct sep_lli_entry_t table_data; - struct sep_lli_entry_t *info_entry_ptr; - struct sep_lli_entry_t *prev_info_entry_ptr; - unsigned long i; - - /* init vars */ - error = 0; - prev_info_entry_ptr = 0; - - /* init the first table to default */ - table_data.physical_address = 0xffffffff; - first_table_data_ptr->physical_address = 0xffffffff; - table_data.block_size = 0; - - for (i = 0; i < num_virtual_buffers; i++) { - /* get the virtual buffer address */ - error = get_user(virt_buff_addr, &first_buff_addr); - if (error) - goto end_function; - - /* get the virtual buffer size */ - first_buff_addr++; - error = get_user(virt_buff_size, &first_buff_addr); - if (error) - goto end_function; - - /* advance the address to point to the next pair of address|size */ - first_buff_addr++; - - /* now prepare the one flow LLI table from the data */ - error = sep_prepare_one_flow_dma_table(sep, virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress); - if (error) - goto end_function; - - if (i == 0) { - /* if this is the first table - save it to return to the user - application */ - *first_table_data_ptr = table_data; - - /* set the pointer to info entry */ - prev_info_entry_ptr = info_entry_ptr; - } else { - /* not first table - the previous table info entry should - be updated */ - prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size); - - /* set the pointer to info entry */ - prev_info_entry_ptr = info_entry_ptr; - } - } - - /* set the last table data */ - *last_table_data_ptr = table_data; -end_function: - return error; -} - -/* - this function goes over all the flow tables connected to the given - table and deallocate them -*/ -static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr) -{ - /* id pointer */ - unsigned long *table_ptr; - /* end address of the flow dma area */ - unsigned long num_entries; - unsigned long num_pages; - struct page **pages_ptr; - /* maximum table size in words */ - struct sep_lli_entry_t *info_entry_ptr; - - /* set the pointer to the first table */ - table_ptr = (unsigned long *) first_table_ptr->physical_address; - - /* set the num of entries */ - num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) - & SEP_NUM_ENTRIES_MASK; - - /* go over all the connected tables */ - while (*table_ptr != 0xffffffff) { - /* get number of pages */ - num_pages = *(table_ptr - 2); - - /* get the pointer to the pages */ - pages_ptr = (struct page **) (*(table_ptr - 1)); - - /* free the pages */ - sep_free_dma_pages(pages_ptr, num_pages, 1); - - /* goto to the info entry */ - info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1); - - table_ptr = (unsigned long *) info_entry_ptr->physical_address; - num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK; - } - - return; -} - -/** - * sep_find_flow_context - find a flow - * @sep: the SEP we are working with - * @flow_id: flow identifier - * - * Returns a pointer the matching flow, or NULL if the flow does not - * exist. - */ - -static struct sep_flow_context_t *sep_find_flow_context(struct sep_device *sep, - unsigned long flow_id) -{ - int count; - /* - * always search for flow with id default first - in case we - * already started working on the flow there can be no situation - * when 2 flows are with default flag - */ - for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) { - if (sep->flows[count].flow_id == flow_id) - return &sep->flows[count]; - } - return NULL; -} - - -/* - this function handles the request to create the DMA tables for flow -*/ -static int sep_create_flow_dma_tables_handler(struct sep_device *sep, - unsigned long arg) -{ - int error = -ENOENT; - struct sep_driver_build_flow_table_t command_args; - /* first table - output */ - struct sep_lli_entry_t first_table_data; - /* dma table data */ - struct sep_lli_entry_t last_table_data; - /* pointer to the info entry of the previuos DMA table */ - struct sep_lli_entry_t *prev_info_entry_ptr; - /* pointer to the flow data strucutre */ - struct sep_flow_context_t *flow_context_ptr; - - dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n"); - - /* init variables */ - prev_info_entry_ptr = 0; - first_table_data.physical_address = 0xffffffff; - - /* find the free structure for flow data */ - error = -EINVAL; - flow_context_ptr = sep_find_flow_context(sep, SEP_FREE_FLOW_ID); - if (flow_context_ptr == NULL) - goto end_function; - - error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t)); - if (error) { - error = -EFAULT; - goto end_function; - } - - /* create flow tables */ - error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress); - if (error) - goto end_function_with_error; - - /* check if flow is static */ - if (!command_args.flow_type) - /* point the info entry of the last to the info entry of the first */ - last_table_data = first_table_data; - - /* set output params */ - command_args.first_table_addr = first_table_data.physical_address; - command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK); - command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK); - - /* send the parameters to user application */ - error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t)); - if (error) { - error = -EFAULT; - goto end_function_with_error; - } - - /* all the flow created - update the flow entry with temp id */ - flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID; - - /* set the processing tables data in the context */ - if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG) - flow_context_ptr->input_tables_in_process = first_table_data; - else - flow_context_ptr->output_tables_in_process = first_table_data; - - goto end_function; - -end_function_with_error: - /* free the allocated tables */ - sep_deallocated_flow_tables(&first_table_data); -end_function: - dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n"); - return error; -} - -/* - this function handles add tables to flow -*/ -static int sep_add_flow_tables_handler(struct sep_device *sep, unsigned long arg) -{ - int error; - unsigned long num_entries; - struct sep_driver_add_flow_table_t command_args; - struct sep_flow_context_t *flow_context_ptr; - /* first dma table data */ - struct sep_lli_entry_t first_table_data; - /* last dma table data */ - struct sep_lli_entry_t last_table_data; - /* pointer to the info entry of the current DMA table */ - struct sep_lli_entry_t *info_entry_ptr; - - dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n"); - - /* get input parameters */ - error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t)); - if (error) { - error = -EFAULT; - goto end_function; - } - - /* find the flow structure for the flow id */ - flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id); - if (flow_context_ptr == NULL) - goto end_function; - - /* prepare the flow dma tables */ - error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress); - if (error) - goto end_function_with_error; - - /* now check if there is already an existing add table for this flow */ - if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) { - /* this buffer was for input buffers */ - if (flow_context_ptr->input_tables_flag) { - /* add table already exists - add the new tables to the end - of the previous */ - num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK; - - info_entry_ptr = (struct sep_lli_entry_t *) - (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1))); - - /* connect to list of tables */ - *info_entry_ptr = first_table_data; - - /* set the first table data */ - first_table_data = flow_context_ptr->first_input_table; - } else { - /* set the input flag */ - flow_context_ptr->input_tables_flag = 1; - - /* set the first table data */ - flow_context_ptr->first_input_table = first_table_data; - } - /* set the last table data */ - flow_context_ptr->last_input_table = last_table_data; - } else { /* this is output tables */ - - /* this buffer was for input buffers */ - if (flow_context_ptr->output_tables_flag) { - /* add table already exists - add the new tables to - the end of the previous */ - num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK; - - info_entry_ptr = (struct sep_lli_entry_t *) - (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1))); - - /* connect to list of tables */ - *info_entry_ptr = first_table_data; - - /* set the first table data */ - first_table_data = flow_context_ptr->first_output_table; - } else { - /* set the input flag */ - flow_context_ptr->output_tables_flag = 1; - - /* set the first table data */ - flow_context_ptr->first_output_table = first_table_data; - } - /* set the last table data */ - flow_context_ptr->last_output_table = last_table_data; - } - - /* set output params */ - command_args.first_table_addr = first_table_data.physical_address; - command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK); - command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK); - - /* send the parameters to user application */ - error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t)); - if (error) - error = -EFAULT; -end_function_with_error: - /* free the allocated tables */ - sep_deallocated_flow_tables(&first_table_data); -end_function: - dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n"); - return error; -} - -/* - this function add the flow add message to the specific flow -*/ -static int sep_add_flow_tables_message_handler(struct sep_device *sep, unsigned long arg) -{ - int error; - struct sep_driver_add_message_t command_args; - struct sep_flow_context_t *flow_context_ptr; - - dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n"); - - error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t)); - if (error) { - error = -EFAULT; - goto end_function; - } - - /* check input */ - if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) { - error = -ENOMEM; - goto end_function; - } - - /* find the flow context */ - flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id); - if (flow_context_ptr == NULL) - goto end_function; - - /* copy the message into context */ - flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes; - error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes); - if (error) - error = -EFAULT; -end_function: - dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n"); - return error; -} - - -/* - this function returns the bus and virtual addresses of the static pool -*/ -static int sep_get_static_pool_addr_handler(struct sep_device *sep, unsigned long arg) -{ - int error; - struct sep_driver_static_pool_addr_t command_args; - - dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n"); - - /*prepare the output parameters in the struct */ - command_args.physical_static_address = sep->shared_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES; - command_args.virtual_static_address = (unsigned long)sep->shared_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES; - - edbg("SEP Driver:bus_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address); - - /* send the parameters to user application */ - error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t)); - if (error) - error = -EFAULT; - dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n"); - return error; -} - -/* - this address gets the offset of the physical address from the start - of the mapped area -*/ -static int sep_get_physical_mapped_offset_handler(struct sep_device *sep, unsigned long arg) -{ - int error; - struct sep_driver_get_mapped_offset_t command_args; - - dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n"); - - error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t)); - if (error) { - error = -EFAULT; - goto end_function; - } - - if (command_args.physical_address < sep->shared_bus) { - error = -EINVAL; - goto end_function; - } - - /*prepare the output parameters in the struct */ - command_args.offset = command_args.physical_address - sep->shared_bus; - - edbg("SEP Driver:bus_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset); - - /* send the parameters to user application */ - error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t)); - if (error) - error = -EFAULT; -end_function: - dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n"); - return error; -} - - -/* - ? -*/ -static int sep_start_handler(struct sep_device *sep) -{ - unsigned long reg_val; - unsigned long error = 0; - - dbg("SEP Driver:--------> sep_start_handler start\n"); - - /* wait in polling for message from SEP */ - do - reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR); - while (!reg_val); - - /* check the value */ - if (reg_val == 0x1) - /* fatal error - read error status from GPRO */ - error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR); - dbg("SEP Driver:<-------- sep_start_handler end\n"); - return error; -} - -/* - this function handles the request for SEP initialization -*/ -static int sep_init_handler(struct sep_device *sep, unsigned long arg) -{ - unsigned long message_word; - unsigned long *message_ptr; - struct sep_driver_init_t command_args; - unsigned long counter; - unsigned long error; - unsigned long reg_val; - - dbg("SEP Driver:--------> sep_init_handler start\n"); - error = 0; - - error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t)); - if (error) { - error = -EFAULT; - goto end_function; - } - dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user\n"); - - /* PATCH - configure the DMA to single -burst instead of multi-burst */ - /*sep_configure_dma_burst(); */ - - dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n"); - - message_ptr = (unsigned long *) command_args.message_addr; - - /* set the base address of the SRAM */ - sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS); - - for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) { - get_user(message_word, message_ptr); - /* write data to SRAM */ - sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR, message_word); - edbg("SEP Driver:message_word is %lu\n", message_word); - /* wait for write complete */ - sep_wait_sram_write(sep); - } - dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n"); - /* signal SEP */ - sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1); - - do - reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR); - while (!(reg_val & 0xFFFFFFFD)); - - dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n"); - - /* check the value */ - if (reg_val == 0x1) { - edbg("SEP Driver:init failed\n"); - - error = sep_read_reg(sep, 0x8060); - edbg("SEP Driver:sw monitor is %lu\n", error); - - /* fatal error - read erro status from GPRO */ - error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR); - edbg("SEP Driver:error is %lu\n", error); - } -end_function: - dbg("SEP Driver:<-------- sep_init_handler end\n"); - return error; - -} - -/* - this function handles the request cache and resident reallocation -*/ -static int sep_realloc_cache_resident_handler(struct sep_device *sep, - unsigned long arg) -{ - struct sep_driver_realloc_cache_resident_t command_args; - int error; - - /* copy cache and resident to the their intended locations */ - error = sep_load_firmware(sep); - if (error) - return error; - - command_args.new_base_addr = sep->shared_bus; - - /* find the new base address according to the lowest address between - cache, resident and shared area */ - if (sep->resident_bus < command_args.new_base_addr) - command_args.new_base_addr = sep->resident_bus; - if (sep->rar_bus < command_args.new_base_addr) - command_args.new_base_addr = sep->rar_bus; - - /* set the return parameters */ - command_args.new_cache_addr = sep->rar_bus; - command_args.new_resident_addr = sep->resident_bus; - - /* set the new shared area */ - command_args.new_shared_area_addr = sep->shared_bus; - - edbg("SEP Driver:command_args.new_shared_addr is %08llx\n", command_args.new_shared_area_addr); - edbg("SEP Driver:command_args.new_base_addr is %08llx\n", command_args.new_base_addr); - edbg("SEP Driver:command_args.new_resident_addr is %08llx\n", command_args.new_resident_addr); - edbg("SEP Driver:command_args.new_rar_addr is %08llx\n", command_args.new_cache_addr); - - /* return to user */ - if (copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_realloc_cache_resident_t))) - return -EFAULT; - return 0; -} - -/** - * sep_get_time_handler - time request from user space - * @sep: sep we are to set the time for - * @arg: pointer to user space arg buffer - * - * This function reports back the time and the address in the SEP - * shared buffer at which it has been placed. (Do we really need this!!!) - */ - -static int sep_get_time_handler(struct sep_device *sep, unsigned long arg) -{ - struct sep_driver_get_time_t command_args; - - mutex_lock(&sep_mutex); - command_args.time_value = sep_set_time(sep); - command_args.time_physical_address = (unsigned long)sep_time_address(sep); - mutex_unlock(&sep_mutex); - if (copy_to_user((void __user *)arg, - &command_args, sizeof(struct sep_driver_get_time_t))) - return -EFAULT; - return 0; - -} - -/* - This API handles the end transaction request -*/ -static int sep_end_transaction_handler(struct sep_device *sep, unsigned long arg) -{ - dbg("SEP Driver:--------> sep_end_transaction_handler start\n"); - -#if 0 /*!SEP_DRIVER_POLLING_MODE */ - /* close IMR */ - sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF); - - /* release IRQ line */ - free_irq(SEP_DIRVER_IRQ_NUM, sep); - - /* lock the sep mutex */ - mutex_unlock(&sep_mutex); -#endif - - dbg("SEP Driver:<-------- sep_end_transaction_handler end\n"); - - return 0; -} - - -/** - * sep_set_flow_id_handler - handle flow setting - * @sep: the SEP we are configuring - * @flow_id: the flow we are setting - * - * This function handler the set flow id command - */ -static int sep_set_flow_id_handler(struct sep_device *sep, - unsigned long flow_id) -{ - int error = 0; - struct sep_flow_context_t *flow_data_ptr; - - /* find the flow data structure that was just used for creating new flow - - its id should be default */ - - mutex_lock(&sep_mutex); - flow_data_ptr = sep_find_flow_context(sep, SEP_TEMP_FLOW_ID); - if (flow_data_ptr) - flow_data_ptr->flow_id = flow_id; /* set flow id */ - else - error = -EINVAL; - mutex_unlock(&sep_mutex); - return error; -} - -static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) -{ - int error = 0; - struct sep_device *sep = filp->private_data; - - dbg("------------>SEP Driver: ioctl start\n"); - - edbg("SEP Driver: cmd is %x\n", cmd); - - switch (cmd) { - case SEP_IOCSENDSEPCOMMAND: - /* send command to SEP */ - sep_send_command_handler(sep); - edbg("SEP Driver: after sep_send_command_handler\n"); - break; - case SEP_IOCSENDSEPRPLYCOMMAND: - /* send reply command to SEP */ - sep_send_reply_command_handler(sep); - break; - case SEP_IOCALLOCDATAPOLL: - /* allocate data pool */ - error = sep_allocate_data_pool_memory_handler(sep, arg); - break; - case SEP_IOCWRITEDATAPOLL: - /* write data into memory pool */ - error = sep_write_into_data_pool_handler(sep, arg); - break; - case SEP_IOCREADDATAPOLL: - /* read data from data pool into application memory */ - error = sep_read_from_data_pool_handler(sep, arg); - break; - case SEP_IOCCREATESYMDMATABLE: - /* create dma table for synhronic operation */ - error = sep_create_sync_dma_tables_handler(sep, arg); - break; - case SEP_IOCCREATEFLOWDMATABLE: - /* create flow dma tables */ - error = sep_create_flow_dma_tables_handler(sep, arg); - break; - case SEP_IOCFREEDMATABLEDATA: - /* free the pages */ - error = sep_free_dma_table_data_handler(sep); - break; - case SEP_IOCSETFLOWID: - /* set flow id */ - error = sep_set_flow_id_handler(sep, (unsigned long)arg); - break; - case SEP_IOCADDFLOWTABLE: - /* add tables to the dynamic flow */ - error = sep_add_flow_tables_handler(sep, arg); - break; - case SEP_IOCADDFLOWMESSAGE: - /* add message of add tables to flow */ - error = sep_add_flow_tables_message_handler(sep, arg); - break; - case SEP_IOCSEPSTART: - /* start command to sep */ - error = sep_start_handler(sep); - break; - case SEP_IOCSEPINIT: - /* init command to sep */ - error = sep_init_handler(sep, arg); - break; - case SEP_IOCGETSTATICPOOLADDR: - /* get the physical and virtual addresses of the static pool */ - error = sep_get_static_pool_addr_handler(sep, arg); - break; - case SEP_IOCENDTRANSACTION: - error = sep_end_transaction_handler(sep, arg); - break; - case SEP_IOCREALLOCCACHERES: - error = sep_realloc_cache_resident_handler(sep, arg); - break; - case SEP_IOCGETMAPPEDADDROFFSET: - error = sep_get_physical_mapped_offset_handler(sep, arg); - break; - case SEP_IOCGETIME: - error = sep_get_time_handler(sep, arg); - break; - default: - error = -ENOTTY; - break; - } - dbg("SEP Driver:<-------- ioctl end\n"); - return error; -} - - - -#if !SEP_DRIVER_POLLING_MODE - -/* handler for flow done interrupt */ - -static void sep_flow_done_handler(struct work_struct *work) -{ - struct sep_flow_context_t *flow_data_ptr; - - /* obtain the mutex */ - mutex_lock(&sep_mutex); - - /* get the pointer to context */ - flow_data_ptr = (struct sep_flow_context_t *) work; - - /* free all the current input tables in sep */ - sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process); - - /* free all the current tables output tables in SEP (if needed) */ - if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff) - sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process); - - /* check if we have additional tables to be sent to SEP only input - flag may be checked */ - if (flow_data_ptr->input_tables_flag) { - /* copy the message to the shared RAM and signal SEP */ - memcpy((void *) flow_data_ptr->message, (void *) sep->shared_addr, flow_data_ptr->message_size_in_bytes); - - sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2); - } - mutex_unlock(&sep_mutex); -} -/* - interrupt handler function -*/ -static irqreturn_t sep_inthandler(int irq, void *dev_id) -{ - irqreturn_t int_error; - unsigned long reg_val; - unsigned long flow_id; - struct sep_flow_context_t *flow_context_ptr; - struct sep_device *sep = dev_id; - - int_error = IRQ_HANDLED; - - /* read the IRR register to check if this is SEP interrupt */ - reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR); - edbg("SEP Interrupt - reg is %08lx\n", reg_val); - - /* check if this is the flow interrupt */ - if (0 /*reg_val & (0x1 << 11) */ ) { - /* read GPRO to find out the which flow is done */ - flow_id = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR); - - /* find the contex of the flow */ - flow_context_ptr = sep_find_flow_context(sep, flow_id >> 28); - if (flow_context_ptr == NULL) - goto end_function_with_error; - - /* queue the work */ - INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler); - queue_work(sep->flow_wq, &flow_context_ptr->flow_wq); - - } else { - /* check if this is reply interrupt from SEP */ - if (reg_val & (0x1 << 13)) { - /* update the counter of reply messages */ - sep->reply_ct++; - /* wake up the waiting process */ - wake_up(&sep_event); - } else { - int_error = IRQ_NONE; - goto end_function; - } - } -end_function_with_error: - /* clear the interrupt */ - sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val); -end_function: - return int_error; -} - -#endif - - - -#if 0 - -static void sep_wait_busy(struct sep_device *sep) -{ - u32 reg; - - do { - reg = sep_read_reg(sep, HW_HOST_SEP_BUSY_REG_ADDR); - } while (reg); -} - -/* - PATCH for configuring the DMA to single burst instead of multi-burst -*/ -static void sep_configure_dma_burst(struct sep_device *sep) -{ -#define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL - - dbg("SEP Driver:<-------- sep_configure_dma_burst start \n"); - - /* request access to registers from SEP */ - sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2); - - dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n"); - - sep_wait_busy(sep); - - dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n"); - - /* set the DMA burst register to single burst */ - sep_write_reg(sep, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL); - - /* release the sep busy */ - sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL); - sep_wait_busy(sep); - - dbg("SEP Driver:<-------- sep_configure_dma_burst done \n"); - -} - -#endif - -/* - Function that is activated on the successful probe of the SEP device -*/ -static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - int error = 0; - struct sep_device *sep; - int counter; - int size; /* size of memory for allocation */ - - edbg("Sep pci probe starting\n"); - if (sep_dev != NULL) { - dev_warn(&pdev->dev, "only one SEP supported.\n"); - return -EBUSY; - } - - /* enable the device */ - error = pci_enable_device(pdev); - if (error) { - edbg("error enabling pci device\n"); - goto end_function; - } - - /* set the pci dev pointer */ - sep_dev = &sep_instance; - sep = &sep_instance; - - edbg("sep->shared_addr = %p\n", sep->shared_addr); - /* transaction counter that coordinates the transactions between SEP - and HOST */ - sep->send_ct = 0; - /* counter for the messages from sep */ - sep->reply_ct = 0; - /* counter for the number of bytes allocated in the pool - for the current transaction */ - sep->data_pool_bytes_allocated = 0; - - /* calculate the total size for allocation */ - size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + - SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES; - - /* allocate the shared area */ - if (sep_map_and_alloc_shared_area(sep, size)) { - error = -ENOMEM; - /* allocation failed */ - goto end_function_error; - } - /* now set the memory regions */ -#if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1) - /* Note: this test section will need moving before it could ever - work as the registers are not yet mapped ! */ - /* send the new SHARED MESSAGE AREA to the SEP */ - sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus); - - /* poll for SEP response */ - retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR); - while (retval != 0xffffffff && retval != sep->shared_bus) - retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR); - - /* check the return value (register) */ - if (retval != sep->shared_bus) { - error = -ENOMEM; - goto end_function_deallocate_sep_shared_area; - } -#endif - /* init the flow contextes */ - for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++) - sep->flows[counter].flow_id = SEP_FREE_FLOW_ID; - - sep->flow_wq = create_singlethread_workqueue("sepflowwq"); - if (sep->flow_wq == NULL) { - error = -ENOMEM; - edbg("sep_driver:flow queue creation failed\n"); - goto end_function_deallocate_sep_shared_area; - } - edbg("SEP Driver: create flow workqueue \n"); - sep->pdev = pci_dev_get(pdev); - - sep->reg_addr = pci_ioremap_bar(pdev, 0); - if (!sep->reg_addr) { - edbg("sep: ioremap of registers failed.\n"); - goto end_function_deallocate_sep_shared_area; - } - edbg("SEP Driver:reg_addr is %p\n", sep->reg_addr); - - /* load the rom code */ - sep_load_rom_code(sep); - - /* set up system base address and shared memory location */ - sep->rar_addr = dma_alloc_coherent(&sep->pdev->dev, - 2 * SEP_RAR_IO_MEM_REGION_SIZE, - &sep->rar_bus, GFP_KERNEL); - - if (!sep->rar_addr) { - edbg("SEP Driver:can't allocate rar\n"); - goto end_function_uniomap; - } - - - edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus); - edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr); - -#if !SEP_DRIVER_POLLING_MODE - - edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n"); - - /* clear ICR register */ - sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF); - - /* set the IMR register - open only GPR 2 */ - sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13))); - - edbg("SEP Driver: about to call request_irq\n"); - /* get the interrupt line */ - error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep); - if (error) - goto end_function_free_res; - return 0; - edbg("SEP Driver: about to write IMR REG_ADDR"); - - /* set the IMR register - open only GPR 2 */ - sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13))); - -end_function_free_res: - dma_free_coherent(&sep->pdev->dev, 2 * SEP_RAR_IO_MEM_REGION_SIZE, - sep->rar_addr, sep->rar_bus); -#endif /* SEP_DRIVER_POLLING_MODE */ -end_function_uniomap: - iounmap(sep->reg_addr); -end_function_deallocate_sep_shared_area: - /* de-allocate shared area */ - sep_unmap_and_free_shared_area(sep, size); -end_function_error: - sep_dev = NULL; -end_function: - return error; -} - -static const struct pci_device_id sep_pci_id_tbl[] = { - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)}, - {0} -}; - -MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl); - -/* field for registering driver to PCI device */ -static struct pci_driver sep_pci_driver = { - .name = "sep_sec_driver", - .id_table = sep_pci_id_tbl, - .probe = sep_probe - /* FIXME: remove handler */ -}; - -/* major and minor device numbers */ -static dev_t sep_devno; - -/* the files operations structure of the driver */ -static struct file_operations sep_file_operations = { - .owner = THIS_MODULE, - .unlocked_ioctl = sep_ioctl, - .poll = sep_poll, - .open = sep_open, - .release = sep_release, - .mmap = sep_mmap, -}; - - -/* cdev struct of the driver */ -static struct cdev sep_cdev; - -/* - this function registers the driver to the file system -*/ -static int sep_register_driver_to_fs(void) -{ - int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver"); - if (ret_val) { - edbg("sep: major number allocation failed, retval is %d\n", - ret_val); - return ret_val; - } - /* init cdev */ - cdev_init(&sep_cdev, &sep_file_operations); - sep_cdev.owner = THIS_MODULE; - - /* register the driver with the kernel */ - ret_val = cdev_add(&sep_cdev, sep_devno, 1); - if (ret_val) { - edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val); - /* unregister dev numbers */ - unregister_chrdev_region(sep_devno, 1); - } - return ret_val; -} - - -/*-------------------------------------------------------------- - init function -----------------------------------------------------------------*/ -static int __init sep_init(void) -{ - int ret_val = 0; - dbg("SEP Driver:-------->Init start\n"); - /* FIXME: Probe can occur before we are ready to survive a probe */ - ret_val = pci_register_driver(&sep_pci_driver); - if (ret_val) { - edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val); - goto end_function_unregister_from_fs; - } - /* register driver to fs */ - ret_val = sep_register_driver_to_fs(); - if (ret_val) - goto end_function_unregister_pci; - goto end_function; -end_function_unregister_pci: - pci_unregister_driver(&sep_pci_driver); -end_function_unregister_from_fs: - /* unregister from fs */ - cdev_del(&sep_cdev); - /* unregister dev numbers */ - unregister_chrdev_region(sep_devno, 1); -end_function: - dbg("SEP Driver:<-------- Init end\n"); - return ret_val; -} - - -/*------------------------------------------------------------- - exit function ---------------------------------------------------------------*/ -static void __exit sep_exit(void) -{ - int size; - - dbg("SEP Driver:--------> Exit start\n"); - - /* unregister from fs */ - cdev_del(&sep_cdev); - /* unregister dev numbers */ - unregister_chrdev_region(sep_devno, 1); - /* calculate the total size for de-allocation */ - size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + - SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES; - /* FIXME: We need to do this in the unload for the device */ - /* free shared area */ - if (sep_dev) { - sep_unmap_and_free_shared_area(sep_dev, size); - edbg("SEP Driver: free pages SEP SHARED AREA \n"); - iounmap((void *) sep_dev->reg_addr); - edbg("SEP Driver: iounmap \n"); - } - edbg("SEP Driver: release_mem_region \n"); - dbg("SEP Driver:<-------- Exit end\n"); -} - - -module_init(sep_init); -module_exit(sep_exit); - -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/sep/sep_driver_api.h b/drivers/staging/sep/sep_driver_api.h deleted file mode 100644 index 7ef16da7c4e..00000000000 --- a/drivers/staging/sep/sep_driver_api.h +++ /dev/null @@ -1,425 +0,0 @@ -/* - * - * sep_driver_api.h - Security Processor Driver api definitions - * - * Copyright(c) 2009 Intel Corporation. All rights reserved. - * Copyright(c) 2009 Discretix. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 - * Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * CONTACTS: - * - * Mark Allyn mark.a.allyn@intel.com - * - * CHANGES: - * - * 2009.06.26 Initial publish - * - */ - -#ifndef __SEP_DRIVER_API_H__ -#define __SEP_DRIVER_API_H__ - - - -/*---------------------------------------------------------------- - IOCTL command defines - -----------------------------------------------------------------*/ - -/* magic number 1 of the sep IOCTL command */ -#define SEP_IOC_MAGIC_NUMBER 's' - -/* sends interrupt to sep that message is ready */ -#define SEP_IOCSENDSEPCOMMAND _IO(SEP_IOC_MAGIC_NUMBER , 0) - -/* sends interrupt to sep that message is ready */ -#define SEP_IOCSENDSEPRPLYCOMMAND _IO(SEP_IOC_MAGIC_NUMBER , 1) - -/* allocate memory in data pool */ -#define SEP_IOCALLOCDATAPOLL _IO(SEP_IOC_MAGIC_NUMBER , 2) - -/* write to pre-allocated memory in data pool */ -#define SEP_IOCWRITEDATAPOLL _IO(SEP_IOC_MAGIC_NUMBER , 3) - -/* read from pre-allocated memory in data pool */ -#define SEP_IOCREADDATAPOLL _IO(SEP_IOC_MAGIC_NUMBER , 4) - -/* create sym dma lli tables */ -#define SEP_IOCCREATESYMDMATABLE _IO(SEP_IOC_MAGIC_NUMBER , 5) - -/* create flow dma lli tables */ -#define SEP_IOCCREATEFLOWDMATABLE _IO(SEP_IOC_MAGIC_NUMBER , 6) - -/* free dynamic data aalocated during table creation */ -#define SEP_IOCFREEDMATABLEDATA _IO(SEP_IOC_MAGIC_NUMBER , 7) - -/* get the static pool area addresses (physical and virtual) */ -#define SEP_IOCGETSTATICPOOLADDR _IO(SEP_IOC_MAGIC_NUMBER , 8) - -/* set flow id command */ -#define SEP_IOCSETFLOWID _IO(SEP_IOC_MAGIC_NUMBER , 9) - -/* add tables to the dynamic flow */ -#define SEP_IOCADDFLOWTABLE _IO(SEP_IOC_MAGIC_NUMBER , 10) - -/* add flow add tables message */ -#define SEP_IOCADDFLOWMESSAGE _IO(SEP_IOC_MAGIC_NUMBER , 11) - -/* start sep command */ -#define SEP_IOCSEPSTART _IO(SEP_IOC_MAGIC_NUMBER , 12) - -/* init sep command */ -#define SEP_IOCSEPINIT _IO(SEP_IOC_MAGIC_NUMBER , 13) - -/* end transaction command */ -#define SEP_IOCENDTRANSACTION _IO(SEP_IOC_MAGIC_NUMBER , 15) - -/* reallocate cache and resident */ -#define SEP_IOCREALLOCCACHERES _IO(SEP_IOC_MAGIC_NUMBER , 16) - -/* get the offset of the address starting from the beginnnig of the map area */ -#define SEP_IOCGETMAPPEDADDROFFSET _IO(SEP_IOC_MAGIC_NUMBER , 17) - -/* get time address and value */ -#define SEP_IOCGETIME _IO(SEP_IOC_MAGIC_NUMBER , 19) - -/*------------------------------------------- - TYPEDEFS -----------------------------------------------*/ - -/* - init command struct -*/ -struct sep_driver_init_t { - /* start of the 1G of the host memory address that SEP can access */ - unsigned long message_addr; - - /* start address of resident */ - unsigned long message_size_in_words; - -}; - - -/* - realloc cache resident command -*/ -struct sep_driver_realloc_cache_resident_t { - /* new cache address */ - u64 new_cache_addr; - /* new resident address */ - u64 new_resident_addr; - /* new resident address */ - u64 new_shared_area_addr; - /* new base address */ - u64 new_base_addr; -}; - -struct sep_driver_alloc_t { - /* virtual address of allocated space */ - unsigned long offset; - - /* physical address of allocated space */ - unsigned long phys_address; - - /* number of bytes to allocate */ - unsigned long num_bytes; -}; - -/* - */ -struct sep_driver_write_t { - /* application space address */ - unsigned long app_address; - - /* address of the data pool */ - unsigned long datapool_address; - - /* number of bytes to write */ - unsigned long num_bytes; -}; - -/* - */ -struct sep_driver_read_t { - /* application space address */ - unsigned long app_address; - - /* address of the data pool */ - unsigned long datapool_address; - - /* number of bytes to read */ - unsigned long num_bytes; -}; - -/* -*/ -struct sep_driver_build_sync_table_t { - /* address value of the data in */ - unsigned long app_in_address; - - /* size of data in */ - unsigned long data_in_size; - - /* address of the data out */ - unsigned long app_out_address; - - /* the size of the block of the operation - if needed, - every table will be modulo this parameter */ - unsigned long block_size; - - /* the physical address of the first input DMA table */ - unsigned long in_table_address; - - /* number of entries in the first input DMA table */ - unsigned long in_table_num_entries; - - /* the physical address of the first output DMA table */ - unsigned long out_table_address; - - /* number of entries in the first output DMA table */ - unsigned long out_table_num_entries; - - /* data in the first input table */ - unsigned long table_data_size; - - /* distinct user/kernel layout */ - bool isKernelVirtualAddress; - -}; - -/* -*/ -struct sep_driver_build_flow_table_t { - /* flow type */ - unsigned long flow_type; - - /* flag for input output */ - unsigned long input_output_flag; - - /* address value of the data in */ - unsigned long virt_buff_data_addr; - - /* size of data in */ - unsigned long num_virtual_buffers; - - /* the physical address of the first input DMA table */ - unsigned long first_table_addr; - - /* number of entries in the first input DMA table */ - unsigned long first_table_num_entries; - - /* data in the first input table */ - unsigned long first_table_data_size; - - /* distinct user/kernel layout */ - bool isKernelVirtualAddress; -}; - - -struct sep_driver_add_flow_table_t { - /* flow id */ - unsigned long flow_id; - - /* flag for input output */ - unsigned long inputOutputFlag; - - /* address value of the data in */ - unsigned long virt_buff_data_addr; - - /* size of data in */ - unsigned long num_virtual_buffers; - - /* address of the first table */ - unsigned long first_table_addr; - - /* number of entries in the first table */ - unsigned long first_table_num_entries; - - /* data size of the first table */ - unsigned long first_table_data_size; - - /* distinct user/kernel layout */ - bool isKernelVirtualAddress; - -}; - -/* - command struct for set flow id -*/ -struct sep_driver_set_flow_id_t { - /* flow id to set */ - unsigned long flow_id; -}; - - -/* command struct for add tables message */ -struct sep_driver_add_message_t { - /* flow id to set */ - unsigned long flow_id; - - /* message size in bytes */ - unsigned long message_size_in_bytes; - - /* address of the message */ - unsigned long message_address; -}; - -/* command struct for static pool addresses */ -struct sep_driver_static_pool_addr_t { - /* physical address of the static pool */ - unsigned long physical_static_address; - - /* virtual address of the static pool */ - unsigned long virtual_static_address; -}; - -/* command struct for getiing offset of the physical address from - the start of the mapped area */ -struct sep_driver_get_mapped_offset_t { - /* physical address of the static pool */ - unsigned long physical_address; - - /* virtual address of the static pool */ - unsigned long offset; -}; - -/* command struct for getting time value and address */ -struct sep_driver_get_time_t { - /* physical address of stored time */ - unsigned long time_physical_address; - - /* value of the stored time */ - unsigned long time_value; -}; - - -/* - structure that represent one entry in the DMA LLI table -*/ -struct sep_lli_entry_t { - /* physical address */ - unsigned long physical_address; - - /* block size */ - unsigned long block_size; -}; - -/* - structure that reperesents data needed for lli table construction -*/ -struct sep_lli_prepare_table_data_t { - /* pointer to the memory where the first lli entry to be built */ - struct sep_lli_entry_t *lli_entry_ptr; - - /* pointer to the array of lli entries from which the table is to be built */ - struct sep_lli_entry_t *lli_array_ptr; - - /* number of elements in lli array */ - int lli_array_size; - - /* number of entries in the created table */ - int num_table_entries; - - /* number of array entries processed during table creation */ - int num_array_entries_processed; - - /* the totatl data size in the created table */ - int lli_table_total_data_size; -}; - -/* - structure that represent tone table - it is not used in code, jkust - to show what table looks like -*/ -struct sep_lli_table_t { - /* number of pages mapped in this tables. If 0 - means that the table - is not defined (used as a valid flag) */ - unsigned long num_pages; - /* - pointer to array of page pointers that represent the mapping of the - virtual buffer defined by the table to the physical memory. If this - pointer is NULL, it means that the table is not defined - (used as a valid flag) - */ - struct page **table_page_array_ptr; - - /* maximum flow entries in table */ - struct sep_lli_entry_t lli_entries[SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE]; -}; - - -/* - structure for keeping the mapping of the virtual buffer into physical pages -*/ -struct sep_flow_buffer_data { - /* pointer to the array of page structs pointers to the pages of the - virtual buffer */ - struct page **page_array_ptr; - - /* number of pages taken by the virtual buffer */ - unsigned long num_pages; - - /* this flag signals if this page_array is the last one among many that were - sent in one setting to SEP */ - unsigned long last_page_array_flag; -}; - -/* - struct that keeps all the data for one flow -*/ -struct sep_flow_context_t { - /* - work struct for handling the flow done interrupt in the workqueue - this structure must be in the first place, since it will be used - forcasting to the containing flow context - */ - struct work_struct flow_wq; - - /* flow id */ - unsigned long flow_id; - - /* additional input tables exists */ - unsigned long input_tables_flag; - - /* additional output tables exists */ - unsigned long output_tables_flag; - - /* data of the first input file */ - struct sep_lli_entry_t first_input_table; - - /* data of the first output table */ - struct sep_lli_entry_t first_output_table; - - /* last input table data */ - struct sep_lli_entry_t last_input_table; - - /* last output table data */ - struct sep_lli_entry_t last_output_table; - - /* first list of table */ - struct sep_lli_entry_t input_tables_in_process; - - /* output table in process (in sep) */ - struct sep_lli_entry_t output_tables_in_process; - - /* size of messages in bytes */ - unsigned long message_size_in_bytes; - - /* message */ - unsigned char message[SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES]; -}; - - -#endif diff --git a/drivers/staging/sep/sep_driver_config.h b/drivers/staging/sep/sep_driver_config.h deleted file mode 100644 index 6008fe5eca0..00000000000 --- a/drivers/staging/sep/sep_driver_config.h +++ /dev/null @@ -1,225 +0,0 @@ -/* - * - * sep_driver_config.h - Security Processor Driver configuration - * - * Copyright(c) 2009 Intel Corporation. All rights reserved. - * Copyright(c) 2009 Discretix. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 - * Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * CONTACTS: - * - * Mark Allyn mark.a.allyn@intel.com - * - * CHANGES: - * - * 2009.06.26 Initial publish - * - */ - -#ifndef __SEP_DRIVER_CONFIG_H__ -#define __SEP_DRIVER_CONFIG_H__ - - -/*-------------------------------------- - DRIVER CONFIGURATION FLAGS - -------------------------------------*/ - -/* if flag is on , then the driver is running in polling and - not interrupt mode */ -#define SEP_DRIVER_POLLING_MODE 1 - -/* flag which defines if the shared area address should be - reconfiged (send to SEP anew) during init of the driver */ -#define SEP_DRIVER_RECONFIG_MESSAGE_AREA 0 - -/* the mode for running on the ARM1172 Evaluation platform (flag is 1) */ -#define SEP_DRIVER_ARM_DEBUG_MODE 0 - -/*------------------------------------------- - INTERNAL DATA CONFIGURATION - -------------------------------------------*/ - -/* flag for the input array */ -#define SEP_DRIVER_IN_FLAG 0 - -/* flag for output array */ -#define SEP_DRIVER_OUT_FLAG 1 - -/* maximum number of entries in one LLI tables */ -#define SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP 8 - - -/*-------------------------------------------------------- - SHARED AREA memory total size is 36K - it is divided is following: - - SHARED_MESSAGE_AREA 8K } - } - STATIC_POOL_AREA 4K } MAPPED AREA ( 24 K) - } - DATA_POOL_AREA 12K } - - SYNCHRONIC_DMA_TABLES_AREA 5K - - FLOW_DMA_TABLES_AREA 4K - - SYSTEM_MEMORY_AREA 3k - - SYSTEM_MEMORY total size is 3k - it is divided as following: - - TIME_MEMORY_AREA 8B ------------------------------------------------------------*/ - - - -/* - the maximum length of the message - the rest of the message shared - area will be dedicated to the dma lli tables -*/ -#define SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES (8 * 1024) - -/* the size of the message shared area in pages */ -#define SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES (8 * 1024) - -/* the size of the data pool static area in pages */ -#define SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES (4 * 1024) - -/* the size of the data pool shared area size in pages */ -#define SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES (12 * 1024) - -/* the size of the message shared area in pages */ -#define SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 5) - - -/* the size of the data pool shared area size in pages */ -#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 4) - -/* system data (time, caller id etc') pool */ -#define SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES 100 - - -/* area size that is mapped - we map the MESSAGE AREA, STATIC POOL and - DATA POOL areas. area must be module 4k */ -#define SEP_DRIVER_MMMAP_AREA_SIZE (1024 * 24) - - -/*----------------------------------------------- - offsets of the areas starting from the shared area start address -*/ - -/* message area offset */ -#define SEP_DRIVER_MESSAGE_AREA_OFFSET_IN_BYTES 0 - -/* static pool area offset */ -#define SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES \ - (SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES) - -/* data pool area offset */ -#define SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES \ - (SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES + \ - SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES) - -/* synhronic dma tables area offset */ -#define SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES \ - (SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + \ - SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) - -/* sep driver flow dma tables area offset */ -#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES \ - (SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES + \ - SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES) - -/* system memory offset in bytes */ -#define SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES \ - (SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES + \ - SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES) - -/* offset of the time area */ -#define SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES \ - (SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES) - - - -/* start physical address of the SEP registers memory in HOST */ -#define SEP_IO_MEM_REGION_START_ADDRESS 0x80000000 - -/* size of the SEP registers memory region in HOST (for now 100 registers) */ -#define SEP_IO_MEM_REGION_SIZE (2 * 0x100000) - -/* define the number of IRQ for SEP interrupts */ -#define SEP_DIRVER_IRQ_NUM 1 - -/* maximum number of add buffers */ -#define SEP_MAX_NUM_ADD_BUFFERS 100 - -/* number of flows */ -#define SEP_DRIVER_NUM_FLOWS 4 - -/* maximum number of entries in flow table */ -#define SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE 25 - -/* offset of the num entries in the block length entry of the LLI */ -#define SEP_NUM_ENTRIES_OFFSET_IN_BITS 24 - -/* offset of the interrupt flag in the block length entry of the LLI */ -#define SEP_INT_FLAG_OFFSET_IN_BITS 31 - -/* mask for extracting data size from LLI */ -#define SEP_TABLE_DATA_SIZE_MASK 0xFFFFFF - -/* mask for entries after being shifted left */ -#define SEP_NUM_ENTRIES_MASK 0x7F - -/* default flow id */ -#define SEP_FREE_FLOW_ID 0xFFFFFFFF - -/* temp flow id used during cretiong of new flow until receiving - real flow id from sep */ -#define SEP_TEMP_FLOW_ID (SEP_DRIVER_NUM_FLOWS + 1) - -/* maximum add buffers message length in bytes */ -#define SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES (7 * 4) - -/* maximum number of concurrent virtual buffers */ -#define SEP_MAX_VIRT_BUFFERS_CONCURRENT 100 - -/* the token that defines the start of time address */ -#define SEP_TIME_VAL_TOKEN 0x12345678 - -/* DEBUG LEVEL MASKS */ -#define SEP_DEBUG_LEVEL_BASIC 0x1 - -#define SEP_DEBUG_LEVEL_EXTENDED 0x4 - - -/* Debug helpers */ - -#define dbg(fmt, args...) \ -do {\ - if (debug & SEP_DEBUG_LEVEL_BASIC) \ - printk(KERN_DEBUG fmt, ##args); \ -} while(0); - -#define edbg(fmt, args...) \ -do { \ - if (debug & SEP_DEBUG_LEVEL_EXTENDED) \ - printk(KERN_DEBUG fmt, ##args); \ -} while(0); - - - -#endif diff --git a/drivers/staging/sep/sep_driver_hw_defs.h b/drivers/staging/sep/sep_driver_hw_defs.h deleted file mode 100644 index ea6abd8a14b..00000000000 --- a/drivers/staging/sep/sep_driver_hw_defs.h +++ /dev/null @@ -1,232 +0,0 @@ -/* - * - * sep_driver_hw_defs.h - Security Processor Driver hardware definitions - * - * Copyright(c) 2009 Intel Corporation. All rights reserved. - * Copyright(c) 2009 Discretix. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 - * Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * CONTACTS: - * - * Mark Allyn mark.a.allyn@intel.com - * - * CHANGES: - * - * 2009.06.26 Initial publish - * - */ - -#ifndef SEP_DRIVER_HW_DEFS__H -#define SEP_DRIVER_HW_DEFS__H - -/*--------------------------------------------------------------------------*/ -/* Abstract: HW Registers Defines. */ -/* */ -/* Note: This file was automatically created !!! */ -/* DO NOT EDIT THIS FILE !!! */ -/*--------------------------------------------------------------------------*/ - - -/* cf registers */ -#define HW_R0B_ADDR_0_REG_ADDR 0x0000UL -#define HW_R0B_ADDR_1_REG_ADDR 0x0004UL -#define HW_R0B_ADDR_2_REG_ADDR 0x0008UL -#define HW_R0B_ADDR_3_REG_ADDR 0x000cUL -#define HW_R0B_ADDR_4_REG_ADDR 0x0010UL -#define HW_R0B_ADDR_5_REG_ADDR 0x0014UL -#define HW_R0B_ADDR_6_REG_ADDR 0x0018UL -#define HW_R0B_ADDR_7_REG_ADDR 0x001cUL -#define HW_R0B_ADDR_8_REG_ADDR 0x0020UL -#define HW_R2B_ADDR_0_REG_ADDR 0x0080UL -#define HW_R2B_ADDR_1_REG_ADDR 0x0084UL -#define HW_R2B_ADDR_2_REG_ADDR 0x0088UL -#define HW_R2B_ADDR_3_REG_ADDR 0x008cUL -#define HW_R2B_ADDR_4_REG_ADDR 0x0090UL -#define HW_R2B_ADDR_5_REG_ADDR 0x0094UL -#define HW_R2B_ADDR_6_REG_ADDR 0x0098UL -#define HW_R2B_ADDR_7_REG_ADDR 0x009cUL -#define HW_R2B_ADDR_8_REG_ADDR 0x00a0UL -#define HW_R3B_REG_ADDR 0x00C0UL -#define HW_R4B_REG_ADDR 0x0100UL -#define HW_CSA_ADDR_0_REG_ADDR 0x0140UL -#define HW_CSA_ADDR_1_REG_ADDR 0x0144UL -#define HW_CSA_ADDR_2_REG_ADDR 0x0148UL -#define HW_CSA_ADDR_3_REG_ADDR 0x014cUL -#define HW_CSA_ADDR_4_REG_ADDR 0x0150UL -#define HW_CSA_ADDR_5_REG_ADDR 0x0154UL -#define HW_CSA_ADDR_6_REG_ADDR 0x0158UL -#define HW_CSA_ADDR_7_REG_ADDR 0x015cUL -#define HW_CSA_ADDR_8_REG_ADDR 0x0160UL -#define HW_CSA_REG_ADDR 0x0140UL -#define HW_SINB_REG_ADDR 0x0180UL -#define HW_SOUTB_REG_ADDR 0x0184UL -#define HW_PKI_CONTROL_REG_ADDR 0x01C0UL -#define HW_PKI_STATUS_REG_ADDR 0x01C4UL -#define HW_PKI_BUSY_REG_ADDR 0x01C8UL -#define HW_PKI_A_1025_REG_ADDR 0x01CCUL -#define HW_PKI_SDMA_CTL_REG_ADDR 0x01D0UL -#define HW_PKI_SDMA_OFFSET_REG_ADDR 0x01D4UL -#define HW_PKI_SDMA_POINTERS_REG_ADDR 0x01D8UL -#define HW_PKI_SDMA_DLENG_REG_ADDR 0x01DCUL -#define HW_PKI_SDMA_EXP_POINTERS_REG_ADDR 0x01E0UL -#define HW_PKI_SDMA_RES_POINTERS_REG_ADDR 0x01E4UL -#define HW_PKI_CLR_REG_ADDR 0x01E8UL -#define HW_PKI_SDMA_BUSY_REG_ADDR 0x01E8UL -#define HW_PKI_SDMA_FIRST_EXP_N_REG_ADDR 0x01ECUL -#define HW_PKI_SDMA_MUL_BY1_REG_ADDR 0x01F0UL -#define HW_PKI_SDMA_RMUL_SEL_REG_ADDR 0x01F4UL -#define HW_DES_KEY_0_REG_ADDR 0x0208UL -#define HW_DES_KEY_1_REG_ADDR 0x020CUL -#define HW_DES_KEY_2_REG_ADDR 0x0210UL -#define HW_DES_KEY_3_REG_ADDR 0x0214UL -#define HW_DES_KEY_4_REG_ADDR 0x0218UL -#define HW_DES_KEY_5_REG_ADDR 0x021CUL -#define HW_DES_CONTROL_0_REG_ADDR 0x0220UL -#define HW_DES_CONTROL_1_REG_ADDR 0x0224UL -#define HW_DES_IV_0_REG_ADDR 0x0228UL -#define HW_DES_IV_1_REG_ADDR 0x022CUL -#define HW_AES_KEY_0_ADDR_0_REG_ADDR 0x0400UL -#define HW_AES_KEY_0_ADDR_1_REG_ADDR 0x0404UL -#define HW_AES_KEY_0_ADDR_2_REG_ADDR 0x0408UL -#define HW_AES_KEY_0_ADDR_3_REG_ADDR 0x040cUL -#define HW_AES_KEY_0_ADDR_4_REG_ADDR 0x0410UL -#define HW_AES_KEY_0_ADDR_5_REG_ADDR 0x0414UL -#define HW_AES_KEY_0_ADDR_6_REG_ADDR 0x0418UL -#define HW_AES_KEY_0_ADDR_7_REG_ADDR 0x041cUL -#define HW_AES_KEY_0_REG_ADDR 0x0400UL -#define HW_AES_IV_0_ADDR_0_REG_ADDR 0x0440UL -#define HW_AES_IV_0_ADDR_1_REG_ADDR 0x0444UL -#define HW_AES_IV_0_ADDR_2_REG_ADDR 0x0448UL -#define HW_AES_IV_0_ADDR_3_REG_ADDR 0x044cUL -#define HW_AES_IV_0_REG_ADDR 0x0440UL -#define HW_AES_CTR1_ADDR_0_REG_ADDR 0x0460UL -#define HW_AES_CTR1_ADDR_1_REG_ADDR 0x0464UL -#define HW_AES_CTR1_ADDR_2_REG_ADDR 0x0468UL -#define HW_AES_CTR1_ADDR_3_REG_ADDR 0x046cUL -#define HW_AES_CTR1_REG_ADDR 0x0460UL -#define HW_AES_SK_REG_ADDR 0x0478UL -#define HW_AES_MAC_OK_REG_ADDR 0x0480UL -#define HW_AES_PREV_IV_0_ADDR_0_REG_ADDR 0x0490UL -#define HW_AES_PREV_IV_0_ADDR_1_REG_ADDR 0x0494UL -#define HW_AES_PREV_IV_0_ADDR_2_REG_ADDR 0x0498UL -#define HW_AES_PREV_IV_0_ADDR_3_REG_ADDR 0x049cUL -#define HW_AES_PREV_IV_0_REG_ADDR 0x0490UL -#define HW_AES_CONTROL_REG_ADDR 0x04C0UL -#define HW_HASH_H0_REG_ADDR 0x0640UL -#define HW_HASH_H1_REG_ADDR 0x0644UL -#define HW_HASH_H2_REG_ADDR 0x0648UL -#define HW_HASH_H3_REG_ADDR 0x064CUL -#define HW_HASH_H4_REG_ADDR 0x0650UL -#define HW_HASH_H5_REG_ADDR 0x0654UL -#define HW_HASH_H6_REG_ADDR 0x0658UL -#define HW_HASH_H7_REG_ADDR 0x065CUL -#define HW_HASH_H8_REG_ADDR 0x0660UL -#define HW_HASH_H9_REG_ADDR 0x0664UL -#define HW_HASH_H10_REG_ADDR 0x0668UL -#define HW_HASH_H11_REG_ADDR 0x066CUL -#define HW_HASH_H12_REG_ADDR 0x0670UL -#define HW_HASH_H13_REG_ADDR 0x0674UL -#define HW_HASH_H14_REG_ADDR 0x0678UL -#define HW_HASH_H15_REG_ADDR 0x067CUL -#define HW_HASH_CONTROL_REG_ADDR 0x07C0UL -#define HW_HASH_PAD_EN_REG_ADDR 0x07C4UL -#define HW_HASH_PAD_CFG_REG_ADDR 0x07C8UL -#define HW_HASH_CUR_LEN_0_REG_ADDR 0x07CCUL -#define HW_HASH_CUR_LEN_1_REG_ADDR 0x07D0UL -#define HW_HASH_CUR_LEN_2_REG_ADDR 0x07D4UL -#define HW_HASH_CUR_LEN_3_REG_ADDR 0x07D8UL -#define HW_HASH_PARAM_REG_ADDR 0x07DCUL -#define HW_HASH_INT_BUSY_REG_ADDR 0x07E0UL -#define HW_HASH_SW_RESET_REG_ADDR 0x07E4UL -#define HW_HASH_ENDIANESS_REG_ADDR 0x07E8UL -#define HW_HASH_DATA_REG_ADDR 0x07ECUL -#define HW_DRNG_CONTROL_REG_ADDR 0x0800UL -#define HW_DRNG_VALID_REG_ADDR 0x0804UL -#define HW_DRNG_DATA_REG_ADDR 0x0808UL -#define HW_RND_SRC_EN_REG_ADDR 0x080CUL -#define HW_AES_CLK_ENABLE_REG_ADDR 0x0810UL -#define HW_DES_CLK_ENABLE_REG_ADDR 0x0814UL -#define HW_HASH_CLK_ENABLE_REG_ADDR 0x0818UL -#define HW_PKI_CLK_ENABLE_REG_ADDR 0x081CUL -#define HW_CLK_STATUS_REG_ADDR 0x0824UL -#define HW_CLK_ENABLE_REG_ADDR 0x0828UL -#define HW_DRNG_SAMPLE_REG_ADDR 0x0850UL -#define HW_RND_SRC_CTL_REG_ADDR 0x0858UL -#define HW_CRYPTO_CTL_REG_ADDR 0x0900UL -#define HW_CRYPTO_STATUS_REG_ADDR 0x090CUL -#define HW_CRYPTO_BUSY_REG_ADDR 0x0910UL -#define HW_AES_BUSY_REG_ADDR 0x0914UL -#define HW_DES_BUSY_REG_ADDR 0x0918UL -#define HW_HASH_BUSY_REG_ADDR 0x091CUL -#define HW_CONTENT_REG_ADDR 0x0924UL -#define HW_VERSION_REG_ADDR 0x0928UL -#define HW_CONTEXT_ID_REG_ADDR 0x0930UL -#define HW_DIN_BUFFER_REG_ADDR 0x0C00UL -#define HW_DIN_MEM_DMA_BUSY_REG_ADDR 0x0c20UL -#define HW_SRC_LLI_MEM_ADDR_REG_ADDR 0x0c24UL -#define HW_SRC_LLI_WORD0_REG_ADDR 0x0C28UL -#define HW_SRC_LLI_WORD1_REG_ADDR 0x0C2CUL -#define HW_SRAM_SRC_ADDR_REG_ADDR 0x0c30UL -#define HW_DIN_SRAM_BYTES_LEN_REG_ADDR 0x0c34UL -#define HW_DIN_SRAM_DMA_BUSY_REG_ADDR 0x0C38UL -#define HW_WRITE_ALIGN_REG_ADDR 0x0C3CUL -#define HW_OLD_DATA_REG_ADDR 0x0C48UL -#define HW_WRITE_ALIGN_LAST_REG_ADDR 0x0C4CUL -#define HW_DOUT_BUFFER_REG_ADDR 0x0C00UL -#define HW_DST_LLI_WORD0_REG_ADDR 0x0D28UL -#define HW_DST_LLI_WORD1_REG_ADDR 0x0D2CUL -#define HW_DST_LLI_MEM_ADDR_REG_ADDR 0x0D24UL -#define HW_DOUT_MEM_DMA_BUSY_REG_ADDR 0x0D20UL -#define HW_SRAM_DEST_ADDR_REG_ADDR 0x0D30UL -#define HW_DOUT_SRAM_BYTES_LEN_REG_ADDR 0x0D34UL -#define HW_DOUT_SRAM_DMA_BUSY_REG_ADDR 0x0D38UL -#define HW_READ_ALIGN_REG_ADDR 0x0D3CUL -#define HW_READ_LAST_DATA_REG_ADDR 0x0D44UL -#define HW_RC4_THRU_CPU_REG_ADDR 0x0D4CUL -#define HW_AHB_SINGLE_REG_ADDR 0x0E00UL -#define HW_SRAM_DATA_REG_ADDR 0x0F00UL -#define HW_SRAM_ADDR_REG_ADDR 0x0F04UL -#define HW_SRAM_DATA_READY_REG_ADDR 0x0F08UL -#define HW_HOST_IRR_REG_ADDR 0x0A00UL -#define HW_HOST_IMR_REG_ADDR 0x0A04UL -#define HW_HOST_ICR_REG_ADDR 0x0A08UL -#define HW_HOST_SEP_SRAM_THRESHOLD_REG_ADDR 0x0A10UL -#define HW_HOST_SEP_BUSY_REG_ADDR 0x0A14UL -#define HW_HOST_SEP_LCS_REG_ADDR 0x0A18UL -#define HW_HOST_CC_SW_RST_REG_ADDR 0x0A40UL -#define HW_HOST_SEP_SW_RST_REG_ADDR 0x0A44UL -#define HW_HOST_FLOW_DMA_SW_INT0_REG_ADDR 0x0A80UL -#define HW_HOST_FLOW_DMA_SW_INT1_REG_ADDR 0x0A84UL -#define HW_HOST_FLOW_DMA_SW_INT2_REG_ADDR 0x0A88UL -#define HW_HOST_FLOW_DMA_SW_INT3_REG_ADDR 0x0A8cUL -#define HW_HOST_FLOW_DMA_SW_INT4_REG_ADDR 0x0A90UL -#define HW_HOST_FLOW_DMA_SW_INT5_REG_ADDR 0x0A94UL -#define HW_HOST_FLOW_DMA_SW_INT6_REG_ADDR 0x0A98UL -#define HW_HOST_FLOW_DMA_SW_INT7_REG_ADDR 0x0A9cUL -#define HW_HOST_SEP_HOST_GPR0_REG_ADDR 0x0B00UL -#define HW_HOST_SEP_HOST_GPR1_REG_ADDR 0x0B04UL -#define HW_HOST_SEP_HOST_GPR2_REG_ADDR 0x0B08UL -#define HW_HOST_SEP_HOST_GPR3_REG_ADDR 0x0B0CUL -#define HW_HOST_HOST_SEP_GPR0_REG_ADDR 0x0B80UL -#define HW_HOST_HOST_SEP_GPR1_REG_ADDR 0x0B84UL -#define HW_HOST_HOST_SEP_GPR2_REG_ADDR 0x0B88UL -#define HW_HOST_HOST_SEP_GPR3_REG_ADDR 0x0B8CUL -#define HW_HOST_HOST_ENDIAN_REG_ADDR 0x0B90UL -#define HW_HOST_HOST_COMM_CLK_EN_REG_ADDR 0x0B94UL -#define HW_CLR_SRAM_BUSY_REG_REG_ADDR 0x0F0CUL -#define HW_CC_SRAM_BASE_ADDRESS 0x5800UL - -#endif /* ifndef HW_DEFS */ diff --git a/drivers/staging/spectra/Kconfig b/drivers/staging/spectra/Kconfig index 5e2ffefb60a..d231ae27299 100644 --- a/drivers/staging/spectra/Kconfig +++ b/drivers/staging/spectra/Kconfig @@ -2,6 +2,7 @@ menuconfig SPECTRA tristate "Denali Spectra Flash Translation Layer" depends on BLOCK + depends on X86_MRST default n ---help--- Enable the FTL pseudo-filesystem used with the NAND Flash diff --git a/drivers/staging/spectra/ffsport.c b/drivers/staging/spectra/ffsport.c index d0c5c97eda3..fa21a0fd8e8 100644 --- a/drivers/staging/spectra/ffsport.c +++ b/drivers/staging/spectra/ffsport.c @@ -27,6 +27,8 @@ #include <linux/kthread.h> #include <linux/log2.h> #include <linux/init.h> +#include <linux/smp_lock.h> +#include <linux/slab.h> /**** Helper functions used for Div, Remainder operation on u64 ****/ @@ -113,7 +115,6 @@ u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type) #define GLOB_SBD_NAME "nd" #define GLOB_SBD_IRQ_NUM (29) -#define GLOB_VERSION "driver version 20091110" #define GLOB_SBD_IOCTL_GC (0x7701) #define GLOB_SBD_IOCTL_WL (0x7702) @@ -272,13 +273,6 @@ static int get_res_blk_num_os(void) return res_blks; } -static void SBD_prepare_flush(struct request_queue *q, struct request *rq) -{ - rq->cmd_type = REQ_TYPE_LINUX_BLOCK; - /* rq->timeout = 5 * HZ; */ - rq->cmd[0] = REQ_LB_OP_FLUSH; -} - /* Transfer a full request. */ static int do_transfer(struct spectra_nand_dev *tr, struct request *req) { @@ -296,8 +290,7 @@ static int do_transfer(struct spectra_nand_dev *tr, struct request *req) IdentifyDeviceData.PagesPerBlock * res_blks_os; - if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && - req->cmd[0] == REQ_LB_OP_FLUSH) { + if (req->cmd_type & REQ_FLUSH) { if (force_flush_cache()) /* Fail to flush cache */ return -EIO; else @@ -597,11 +590,23 @@ int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode, return -ENOTTY; } +int GLOB_SBD_unlocked_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + int ret; + + lock_kernel(); + ret = GLOB_SBD_ioctl(bdev, mode, cmd, arg); + unlock_kernel(); + + return ret; +} + static struct block_device_operations GLOB_SBD_ops = { .owner = THIS_MODULE, .open = GLOB_SBD_open, .release = GLOB_SBD_release, - .locked_ioctl = GLOB_SBD_ioctl, + .ioctl = GLOB_SBD_unlocked_ioctl, .getgeo = GLOB_SBD_getgeo, }; @@ -650,8 +655,7 @@ static int SBD_setup_device(struct spectra_nand_dev *dev, int which) /* Here we force report 512 byte hardware sector size to Kernel */ blk_queue_logical_block_size(dev->queue, 512); - blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH, - SBD_prepare_flush); + blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH); dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd"); if (IS_ERR(dev->thread)) { diff --git a/drivers/staging/spectra/flash.c b/drivers/staging/spectra/flash.c index 134aa5166a8..9b5218b6ada 100644 --- a/drivers/staging/spectra/flash.c +++ b/drivers/staging/spectra/flash.c @@ -61,7 +61,6 @@ static void FTL_Cache_Read_Page(u8 *pData, u64 dwPageAddr, static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr, u8 cache_blk, u16 flag); static int FTL_Cache_Write(void); -static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr); static void FTL_Calculate_LRU(void); static u32 FTL_Get_Block_Index(u32 wBlockNum); @@ -86,8 +85,6 @@ static u32 FTL_Replace_MWBlock(void); static int FTL_Replace_Block(u64 blk_addr); static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX); -static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr, u64 blk_addr); - struct device_info_tag DeviceInfo; struct flash_cache_tag Cache; static struct spectra_l2_cache_info cache_l2; @@ -775,7 +772,7 @@ static void dump_cache_l2_table(void) { struct list_head *p; struct spectra_l2_cache_list *pnd; - int n, i; + int n; n = 0; list_for_each(p, &cache_l2.table.list) { @@ -1538,79 +1535,6 @@ static int FTL_Cache_Write_All(u8 *pData, u64 blk_addr) } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& -* Function: FTL_Cache_Update_Block -* Inputs: pointer to buffer,page address,block address -* Outputs: PASS=0 / FAIL=1 -* Description: It updates the cache -*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ -static int FTL_Cache_Update_Block(u8 *pData, - u64 old_page_addr, u64 blk_addr) -{ - int i, j; - u8 *buf = pData; - int wResult = PASS; - int wFoundInCache; - u64 page_addr; - u64 addr; - u64 old_blk_addr; - u16 page_offset; - - nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", - __FILE__, __LINE__, __func__); - - old_blk_addr = (u64)(old_page_addr >> - DeviceInfo.nBitsInBlockDataSize) * DeviceInfo.wBlockDataSize; - page_offset = (u16)(GLOB_u64_Remainder(old_page_addr, 2) >> - DeviceInfo.nBitsInPageDataSize); - - for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) { - page_addr = old_blk_addr + i * DeviceInfo.wPageDataSize; - if (i != page_offset) { - wFoundInCache = FAIL; - for (j = 0; j < CACHE_ITEM_NUM; j++) { - addr = Cache.array[j].address; - addr = FTL_Get_Physical_Block_Addr(addr) + - GLOB_u64_Remainder(addr, 2); - if ((addr >= page_addr) && addr < - (page_addr + Cache.cache_item_size)) { - wFoundInCache = PASS; - buf = Cache.array[j].buf; - Cache.array[j].changed = SET; -#if CMD_DMA -#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE - int_cache[ftl_cmd_cnt].item = j; - int_cache[ftl_cmd_cnt].cache.address = - Cache.array[j].address; - int_cache[ftl_cmd_cnt].cache.changed = - Cache.array[j].changed; -#endif -#endif - break; - } - } - if (FAIL == wFoundInCache) { - if (ERR == FTL_Cache_Read_All(g_pTempBuf, - page_addr)) { - wResult = FAIL; - break; - } - buf = g_pTempBuf; - } - } else { - buf = pData; - } - - if (FAIL == FTL_Cache_Write_All(buf, - blk_addr + (page_addr - old_blk_addr))) { - wResult = FAIL; - break; - } - } - - return wResult; -} - -/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Copy_Block * Inputs: source block address * Destination block address @@ -1698,7 +1622,7 @@ static int get_l2_cache_blks(void) static int erase_l2_cache_blocks(void) { int i, ret = PASS; - u32 pblk, lblk; + u32 pblk, lblk = BAD_BLOCK; u64 addr; u32 *pbt = (u32 *)g_pBlockTable; @@ -2004,87 +1928,6 @@ static int search_l2_cache(u8 *buf, u64 logical_addr) return ret; } -/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& -* Function: FTL_Cache_Write_Back -* Inputs: pointer to data cached in sys memory -* address of free block in flash -* Outputs: PASS=0 / FAIL=1 -* Description: writes all the pages of Cache Block to flash -* -*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ -static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr) -{ - int i, j, iErase; - u64 old_page_addr, addr, phy_addr; - u32 *pbt = (u32 *)g_pBlockTable; - u32 lba; - - nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", - __FILE__, __LINE__, __func__); - - old_page_addr = FTL_Get_Physical_Block_Addr(blk_addr) + - GLOB_u64_Remainder(blk_addr, 2); - - iErase = (FAIL == FTL_Replace_Block(blk_addr)) ? PASS : FAIL; - - pbt[BLK_FROM_ADDR(blk_addr)] &= (~SPARE_BLOCK); - -#if CMD_DMA - p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free; - g_pBTDelta_Free += sizeof(struct BTableChangesDelta); - - p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt; - p_BTableChangesDelta->BT_Index = (u32)(blk_addr >> - DeviceInfo.nBitsInBlockDataSize); - p_BTableChangesDelta->BT_Entry_Value = - pbt[(u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize)]; - p_BTableChangesDelta->ValidFields = 0x0C; -#endif - - if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) { - g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE; - FTL_Write_IN_Progress_Block_Table_Page(); - } - - for (i = 0; i < RETRY_TIMES; i++) { - if (PASS == iErase) { - phy_addr = FTL_Get_Physical_Block_Addr(blk_addr); - if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) { - lba = BLK_FROM_ADDR(blk_addr); - MARK_BLOCK_AS_BAD(pbt[lba]); - i = RETRY_TIMES; - break; - } - } - - for (j = 0; j < CACHE_ITEM_NUM; j++) { - addr = Cache.array[j].address; - if ((addr <= blk_addr) && - ((addr + Cache.cache_item_size) > blk_addr)) - cache_block_to_write = j; - } - - phy_addr = FTL_Get_Physical_Block_Addr(blk_addr); - if (PASS == FTL_Cache_Update_Block(pData, - old_page_addr, phy_addr)) { - cache_block_to_write = UNHIT_CACHE_ITEM; - break; - } else { - iErase = PASS; - } - } - - if (i >= RETRY_TIMES) { - if (ERR == FTL_Flash_Error_Handle(pData, - old_page_addr, blk_addr)) - return ERR; - else - return FAIL; - } - - return PASS; -} - /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Cache_Write_Page * Inputs: Pointer to buffer, page address, cache block number @@ -2370,159 +2213,6 @@ static int FTL_Write_Block_Table(int wForce) return 1; } -/****************************************************************** -* Function: GLOB_FTL_Flash_Format -* Inputs: none -* Outputs: PASS -* Description: The block table stores bad block info, including MDF+ -* blocks gone bad over the ages. Therefore, if we have a -* block table in place, then use it to scan for bad blocks -* If not, then scan for MDF. -* Now, a block table will only be found if spectra was already -* being used. For a fresh flash, we'll go thru scanning for -* MDF. If spectra was being used, then there is a chance that -* the MDF has been corrupted. Spectra avoids writing to the -* first 2 bytes of the spare area to all pages in a block. This -* covers all known flash devices. However, since flash -* manufacturers have no standard of where the MDF is stored, -* this cannot guarantee that the MDF is protected for future -* devices too. The initial scanning for the block table assures -* this. It is ok even if the block table is outdated, as all -* we're looking for are bad block markers. -* Use this when mounting a file system or starting a -* new flash. -* -*********************************************************************/ -static int FTL_Format_Flash(u8 valid_block_table) -{ - u32 i, j; - u32 *pbt = (u32 *)g_pBlockTable; - u32 tempNode; - int ret; - -#if CMD_DMA - u32 *pbtStartingCopy = (u32 *)g_pBTStartingCopy; - if (ftl_cmd_cnt) - return FAIL; -#endif - - if (FAIL == FTL_Check_Block_Table(FAIL)) - valid_block_table = 0; - - if (valid_block_table) { - u8 switched = 1; - u32 block, k; - - k = DeviceInfo.wSpectraStartBlock; - while (switched && (k < DeviceInfo.wSpectraEndBlock)) { - switched = 0; - k++; - for (j = DeviceInfo.wSpectraStartBlock, i = 0; - j <= DeviceInfo.wSpectraEndBlock; - j++, i++) { - block = (pbt[i] & ~BAD_BLOCK) - - DeviceInfo.wSpectraStartBlock; - if (block != i) { - switched = 1; - tempNode = pbt[i]; - pbt[i] = pbt[block]; - pbt[block] = tempNode; - } - } - } - if ((k == DeviceInfo.wSpectraEndBlock) && switched) - valid_block_table = 0; - } - - if (!valid_block_table) { - memset(g_pBlockTable, 0, - DeviceInfo.wDataBlockNum * sizeof(u32)); - memset(g_pWearCounter, 0, - DeviceInfo.wDataBlockNum * sizeof(u8)); - if (DeviceInfo.MLCDevice) - memset(g_pReadCounter, 0, - DeviceInfo.wDataBlockNum * sizeof(u16)); -#if CMD_DMA - memset(g_pBTStartingCopy, 0, - DeviceInfo.wDataBlockNum * sizeof(u32)); - memset(g_pWearCounterCopy, 0, - DeviceInfo.wDataBlockNum * sizeof(u8)); - if (DeviceInfo.MLCDevice) - memset(g_pReadCounterCopy, 0, - DeviceInfo.wDataBlockNum * sizeof(u16)); -#endif - for (j = DeviceInfo.wSpectraStartBlock, i = 0; - j <= DeviceInfo.wSpectraEndBlock; - j++, i++) { - if (GLOB_LLD_Get_Bad_Block((u32)j)) - pbt[i] = (u32)(BAD_BLOCK | j); - } - } - - nand_dbg_print(NAND_DBG_WARN, "Erasing all blocks in the NAND\n"); - - for (j = DeviceInfo.wSpectraStartBlock, i = 0; - j <= DeviceInfo.wSpectraEndBlock; - j++, i++) { - if ((pbt[i] & BAD_BLOCK) != BAD_BLOCK) { - ret = GLOB_LLD_Erase_Block(j); - if (FAIL == ret) { - pbt[i] = (u32)(j); - MARK_BLOCK_AS_BAD(pbt[i]); - nand_dbg_print(NAND_DBG_WARN, - "NAND Program fail in %s, Line %d, " - "Function: %s, new Bad Block %d generated!\n", - __FILE__, __LINE__, __func__, (int)j); - } else { - pbt[i] = (u32)(SPARE_BLOCK | j); - } - } -#if CMD_DMA - pbtStartingCopy[i] = pbt[i]; -#endif - } - - g_wBlockTableOffset = 0; - for (i = 0; (i <= (DeviceInfo.wSpectraEndBlock - - DeviceInfo.wSpectraStartBlock)) - && ((pbt[i] & BAD_BLOCK) == BAD_BLOCK); i++) - ; - if (i > (DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock)) { - printk(KERN_ERR "All blocks bad!\n"); - return FAIL; - } else { - g_wBlockTableIndex = pbt[i] & ~BAD_BLOCK; - if (i != BLOCK_TABLE_INDEX) { - tempNode = pbt[i]; - pbt[i] = pbt[BLOCK_TABLE_INDEX]; - pbt[BLOCK_TABLE_INDEX] = tempNode; - } - } - pbt[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK); - -#if CMD_DMA - pbtStartingCopy[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK); -#endif - - g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE; - memset(g_pBTBlocks, 0xFF, - (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32)); - g_pBTBlocks[FIRST_BT_ID-FIRST_BT_ID] = g_wBlockTableIndex; - FTL_Write_Block_Table(FAIL); - - for (i = 0; i < CACHE_ITEM_NUM; i++) { - Cache.array[i].address = NAND_CACHE_INIT_ADDR; - Cache.array[i].use_cnt = 0; - Cache.array[i].changed = CLEAR; - } - -#if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA) - memcpy((void *)&cache_start_copy, (void *)&Cache, - sizeof(struct flash_cache_tag)); -#endif - return PASS; -} - static int force_format_nand(void) { u32 i; @@ -3031,112 +2721,6 @@ static int FTL_Read_Block_Table(void) return wResult; } - -/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& -* Function: FTL_Flash_Error_Handle -* Inputs: Pointer to data -* Page address -* Block address -* Outputs: PASS=0 / FAIL=1 -* Description: It handles any error occured during Spectra operation -*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ -static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr, - u64 blk_addr) -{ - u32 i; - int j; - u32 tmp_node, blk_node = BLK_FROM_ADDR(blk_addr); - u64 phy_addr; - int wErase = FAIL; - int wResult = FAIL; - u32 *pbt = (u32 *)g_pBlockTable; - - nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", - __FILE__, __LINE__, __func__); - - if (ERR == GLOB_FTL_Garbage_Collection()) - return ERR; - - do { - for (i = DeviceInfo.wSpectraEndBlock - - DeviceInfo.wSpectraStartBlock; - i > 0; i--) { - if (IS_SPARE_BLOCK(i)) { - tmp_node = (u32)(BAD_BLOCK | - pbt[blk_node]); - pbt[blk_node] = (u32)(pbt[i] & - (~SPARE_BLOCK)); - pbt[i] = tmp_node; -#if CMD_DMA - p_BTableChangesDelta = - (struct BTableChangesDelta *) - g_pBTDelta_Free; - g_pBTDelta_Free += - sizeof(struct BTableChangesDelta); - - p_BTableChangesDelta->ftl_cmd_cnt = - ftl_cmd_cnt; - p_BTableChangesDelta->BT_Index = - blk_node; - p_BTableChangesDelta->BT_Entry_Value = - pbt[blk_node]; - p_BTableChangesDelta->ValidFields = 0x0C; - - p_BTableChangesDelta = - (struct BTableChangesDelta *) - g_pBTDelta_Free; - g_pBTDelta_Free += - sizeof(struct BTableChangesDelta); - - p_BTableChangesDelta->ftl_cmd_cnt = - ftl_cmd_cnt; - p_BTableChangesDelta->BT_Index = i; - p_BTableChangesDelta->BT_Entry_Value = pbt[i]; - p_BTableChangesDelta->ValidFields = 0x0C; -#endif - wResult = PASS; - break; - } - } - - if (FAIL == wResult) { - if (FAIL == GLOB_FTL_Garbage_Collection()) - break; - else - continue; - } - - if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) { - g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE; - FTL_Write_IN_Progress_Block_Table_Page(); - } - - phy_addr = FTL_Get_Physical_Block_Addr(blk_addr); - - for (j = 0; j < RETRY_TIMES; j++) { - if (PASS == wErase) { - if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) { - MARK_BLOCK_AS_BAD(pbt[blk_node]); - break; - } - } - if (PASS == FTL_Cache_Update_Block(pData, - old_page_addr, - phy_addr)) { - wResult = PASS; - break; - } else { - wResult = FAIL; - wErase = PASS; - } - } - } while (FAIL == wResult); - - FTL_Write_Block_Table(FAIL); - - return wResult; -} - /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Get_Page_Num * Inputs: Size in bytes diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c index 368c30a9d5f..4af83d5318f 100644 --- a/drivers/staging/wlan-ng/cfg80211.c +++ b/drivers/staging/wlan-ng/cfg80211.c @@ -219,6 +219,7 @@ int prism2_get_key(struct wiphy *wiphy, struct net_device *dev, return -ENOENT; params.key_len = len; params.key = wlandev->wep_keys[key_index]; + params.seq_len = 0; callback(cookie, ¶ms); @@ -735,6 +736,8 @@ struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev) priv->band.n_channels = ARRAY_SIZE(prism2_channels); priv->band.bitrates = priv->rates; priv->band.n_bitrates = ARRAY_SIZE(prism2_rates); + priv->band.band = IEEE80211_BAND_2GHZ; + priv->band.ht_cap.ht_supported = false; wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; set_wiphy_dev(wiphy, dev); diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index 77d4d715a78..722c840ac63 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c @@ -769,6 +769,7 @@ static int __init zram_init(void) free_devices: while (dev_id) destroy_device(&devices[--dev_id]); + kfree(devices); unregister: unregister_blkdev(zram_major, "zram"); out: diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index 593fc5e2d2e..5af23cc5ea9 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c @@ -1127,6 +1127,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance, { struct cxacru_data *instance; struct usb_device *usb_dev = interface_to_usbdev(intf); + struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD]; int ret; /* instance init */ @@ -1171,15 +1172,34 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance, goto fail; } - usb_fill_int_urb(instance->rcv_urb, + if (!cmd_ep) { + dbg("cxacru_bind: no command endpoint"); + ret = -ENODEV; + goto fail; + } + + if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) + == USB_ENDPOINT_XFER_INT) { + usb_fill_int_urb(instance->rcv_urb, usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD), instance->rcv_buf, PAGE_SIZE, cxacru_blocking_completion, &instance->rcv_done, 1); - usb_fill_int_urb(instance->snd_urb, + usb_fill_int_urb(instance->snd_urb, usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD), instance->snd_buf, PAGE_SIZE, cxacru_blocking_completion, &instance->snd_done, 4); + } else { + usb_fill_bulk_urb(instance->rcv_urb, + usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_CMD), + instance->rcv_buf, PAGE_SIZE, + cxacru_blocking_completion, &instance->rcv_done); + + usb_fill_bulk_urb(instance->snd_urb, + usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD), + instance->snd_buf, PAGE_SIZE, + cxacru_blocking_completion, &instance->snd_done); + } mutex_init(&instance->cm_serialize); diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 1833b3a7151..bc62fae0680 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -965,7 +965,8 @@ static int acm_probe(struct usb_interface *intf, } if (!buflen) { - if (intf->cur_altsetting->endpoint->extralen && + if (intf->cur_altsetting->endpoint && + intf->cur_altsetting->endpoint->extralen && intf->cur_altsetting->endpoint->extra) { dev_dbg(&intf->dev, "Seeking extra descriptors on endpoint\n"); @@ -1481,6 +1482,11 @@ static int acm_reset_resume(struct usb_interface *intf) USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \ USB_CDC_ACM_PROTO_VENDOR) +#define SAMSUNG_PCSUITE_ACM_INFO(x) \ + USB_DEVICE_AND_INTERFACE_INFO(0x04e7, x, \ + USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \ + USB_CDC_ACM_PROTO_VENDOR) + /* * USB driver structure. */ @@ -1591,6 +1597,17 @@ static const struct usb_device_id acm_ids[] = { { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */ { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */ { NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */ + { NOKIA_PCSUITE_ACM_INFO(0x0178), }, /* Nokia E63 */ + { NOKIA_PCSUITE_ACM_INFO(0x010e), }, /* Nokia E75 */ + { NOKIA_PCSUITE_ACM_INFO(0x02d9), }, /* Nokia 6760 Slide */ + { NOKIA_PCSUITE_ACM_INFO(0x01d0), }, /* Nokia E52 */ + { NOKIA_PCSUITE_ACM_INFO(0x0223), }, /* Nokia E72 */ + { NOKIA_PCSUITE_ACM_INFO(0x0275), }, /* Nokia X6 */ + { NOKIA_PCSUITE_ACM_INFO(0x026c), }, /* Nokia N97 Mini */ + { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */ + { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */ + { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */ + { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */ /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */ @@ -1599,6 +1616,10 @@ static const struct usb_device_id acm_ids[] = { .driver_info = NOT_A_MODEM, }, + /* control interfaces without any protocol set */ + { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, + USB_CDC_PROTO_NONE) }, + /* control interfaces with various AT-command sets */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, USB_CDC_ACM_PROTO_AT_V25TER) }, diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index fd4c36ea5e4..844683e5038 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1724,6 +1724,15 @@ free_interfaces: if (ret) goto free_interfaces; + /* if it's already configured, clear out old state first. + * getting rid of old interfaces means unbinding their drivers. + */ + if (dev->state != USB_STATE_ADDRESS) + usb_disable_device(dev, 1); /* Skip ep0 */ + + /* Get rid of pending async Set-Config requests for this device */ + cancel_async_set_config(dev); + /* Make sure we have bandwidth (and available HCD resources) for this * configuration. Remove endpoints from the schedule if we're dropping * this configuration to set configuration 0. After this point, the @@ -1733,20 +1742,11 @@ free_interfaces: mutex_lock(&hcd->bandwidth_mutex); ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL); if (ret < 0) { - usb_autosuspend_device(dev); mutex_unlock(&hcd->bandwidth_mutex); + usb_autosuspend_device(dev); goto free_interfaces; } - /* if it's already configured, clear out old state first. - * getting rid of old interfaces means unbinding their drivers. - */ - if (dev->state != USB_STATE_ADDRESS) - usb_disable_device(dev, 1); /* Skip ep0 */ - - /* Get rid of pending async Set-Config requests for this device */ - cancel_async_set_config(dev); - ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), USB_REQ_SET_CONFIGURATION, 0, configuration, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); @@ -1761,8 +1761,8 @@ free_interfaces: if (!cp) { usb_set_device_state(dev, USB_STATE_ADDRESS); usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); - usb_autosuspend_device(dev); mutex_unlock(&hcd->bandwidth_mutex); + usb_autosuspend_device(dev); goto free_interfaces; } mutex_unlock(&hcd->bandwidth_mutex); diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index e483f80822d..1160c55de7f 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -723,12 +723,12 @@ int usb_string_ids_tab(struct usb_composite_dev *cdev, struct usb_string *str) /** * usb_string_ids_n() - allocate unused string IDs in batch - * @cdev: the device whose string descriptor IDs are being allocated + * @c: the device whose string descriptor IDs are being allocated * @n: number of string IDs to allocate * Context: single threaded during gadget setup * * Returns the first requested ID. This ID and next @n-1 IDs are now - * valid IDs. At least providind that @n is non zore because if it + * valid IDs. At least provided that @n is non-zero because if it * is, returns last requested ID which is now very useful information. * * @usb_string_ids_n() is called from bind() callbacks to allocate diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c index 166bf71fd34..e03058fe23c 100644 --- a/drivers/usb/gadget/m66592-udc.c +++ b/drivers/usb/gadget/m66592-udc.c @@ -1609,6 +1609,7 @@ static int __init m66592_probe(struct platform_device *pdev) /* initialize ucd */ m66592 = kzalloc(sizeof(struct m66592), GFP_KERNEL); if (m66592 == NULL) { + ret = -ENOMEM; pr_err("kzalloc error\n"); goto clean_up; } diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c index 70a81784275..2456ccd9965 100644 --- a/drivers/usb/gadget/r8a66597-udc.c +++ b/drivers/usb/gadget/r8a66597-udc.c @@ -1557,6 +1557,7 @@ static int __init r8a66597_probe(struct platform_device *pdev) /* initialize ucd */ r8a66597 = kzalloc(sizeof(struct r8a66597), GFP_KERNEL); if (r8a66597 == NULL) { + ret = -ENOMEM; printk(KERN_ERR "kzalloc error\n"); goto clean_up; } diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c index 020fa5a25fd..972d5ddd1e1 100644 --- a/drivers/usb/gadget/rndis.c +++ b/drivers/usb/gadget/rndis.c @@ -293,9 +293,13 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len, /* mandatory */ case OID_GEN_VENDOR_DESCRIPTION: pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__); - length = strlen (rndis_per_dev_params [configNr].vendorDescr); - memcpy (outbuf, - rndis_per_dev_params [configNr].vendorDescr, length); + if ( rndis_per_dev_params [configNr].vendorDescr ) { + length = strlen (rndis_per_dev_params [configNr].vendorDescr); + memcpy (outbuf, + rndis_per_dev_params [configNr].vendorDescr, length); + } else { + outbuf[0] = 0; + } retval = 0; break; @@ -1148,7 +1152,7 @@ static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS]; #endif /* CONFIG_USB_GADGET_DEBUG_FILES */ -int __init rndis_init (void) +int rndis_init(void) { u8 i; diff --git a/drivers/usb/gadget/rndis.h b/drivers/usb/gadget/rndis.h index c236aaa9dcd..907c3300811 100644 --- a/drivers/usb/gadget/rndis.h +++ b/drivers/usb/gadget/rndis.h @@ -262,7 +262,7 @@ int rndis_signal_disconnect (int configNr); int rndis_state (int configNr); extern void rndis_set_host_mac (int configNr, const u8 *addr); -int __devinit rndis_init (void); +int rndis_init(void); void rndis_exit (void); #endif /* _LINUX_RNDIS_H */ diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c index 521ebed0118..a229744a8c7 100644 --- a/drivers/usb/gadget/s3c-hsotg.c +++ b/drivers/usb/gadget/s3c-hsotg.c @@ -12,8 +12,6 @@ * published by the Free Software Foundation. */ -#define DEBUG - #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c index 2dcffdac86d..5e807f083bc 100644 --- a/drivers/usb/gadget/uvc_v4l2.c +++ b/drivers/usb/gadget/uvc_v4l2.c @@ -94,7 +94,7 @@ uvc_v4l2_set_format(struct uvc_video *video, struct v4l2_format *fmt) break; } - if (format == NULL || format->fcc != fmt->fmt.pix.pixelformat) { + if (i == ARRAY_SIZE(uvc_formats)) { printk(KERN_INFO "Unsupported format 0x%08x.\n", fmt->fmt.pix.pixelformat); return -EINVAL; diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c index 335ee699fd8..ba52be47302 100644 --- a/drivers/usb/host/ehci-ppc-of.c +++ b/drivers/usb/host/ehci-ppc-of.c @@ -192,17 +192,19 @@ ehci_hcd_ppc_of_probe(struct platform_device *op, const struct of_device_id *mat } rv = usb_add_hcd(hcd, irq, 0); - if (rv == 0) - return 0; + if (rv) + goto err_ehci; + + return 0; +err_ehci: + if (ehci->has_amcc_usb23) + iounmap(ehci->ohci_hcctrl_reg); iounmap(hcd->regs); err_ioremap: irq_dispose_mapping(irq); err_irq: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); - - if (ehci->has_amcc_usb23) - iounmap(ehci->ohci_hcctrl_reg); err_rmr: usb_put_hcd(hcd); diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c index d1a3dfc9a40..bdba8c5d844 100644 --- a/drivers/usb/host/isp1760-hcd.c +++ b/drivers/usb/host/isp1760-hcd.c @@ -829,6 +829,7 @@ static void enqueue_an_ATL_packet(struct usb_hcd *hcd, struct isp1760_qh *qh, * almost immediately. With ISP1761, this register requires a delay of * 195ns between a write and subsequent read (see section 15.1.1.3). */ + mmiowb(); ndelay(195); skip_map = isp1760_readl(hcd->regs + HC_ATL_PTD_SKIPMAP_REG); @@ -870,6 +871,7 @@ static void enqueue_an_INT_packet(struct usb_hcd *hcd, struct isp1760_qh *qh, * almost immediately. With ISP1761, this register requires a delay of * 195ns between a write and subsequent read (see section 15.1.1.3). */ + mmiowb(); ndelay(195); skip_map = isp1760_readl(hcd->regs + HC_INT_PTD_SKIPMAP_REG); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index bc3f4f42706..48e60d166ff 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -131,7 +131,7 @@ static void next_trb(struct xhci_hcd *xhci, *seg = (*seg)->next; *trb = ((*seg)->trbs); } else { - *trb = (*trb)++; + (*trb)++; } } @@ -1551,6 +1551,10 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, /* calc actual length */ if (ep->skip) { td->urb->iso_frame_desc[idx].actual_length = 0; + /* Update ring dequeue pointer */ + while (ep_ring->dequeue != td->last_trb) + inc_deq(xhci, ep_ring, false); + inc_deq(xhci, ep_ring, false); return finish_td(xhci, td, event_trb, event, ep, status, true); } diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index d240de097c6..801324af947 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c @@ -439,7 +439,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count, /* drain secondary buffer */ int amount = bytes_to_read < data_in_secondary ? bytes_to_read : data_in_secondary; i = copy_to_user(buffer, dev->read_buffer_secondary+dev->secondary_head, amount); - if (i < 0) { + if (i) { retval = -EFAULT; goto exit; } diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index 2de49c8887c..bc88c79875a 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -542,7 +542,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd, retval = io_res; else { io_res = copy_to_user(user_buffer, buffer, dev->report_size); - if (io_res < 0) + if (io_res) retval = -EFAULT; } break; @@ -574,7 +574,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd, } io_res = copy_to_user((struct iowarrior_info __user *)arg, &info, sizeof(struct iowarrior_info)); - if (io_res < 0) + if (io_res) retval = -EFAULT; break; } diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c index 0e8888588d4..05aaac1c386 100644 --- a/drivers/usb/otg/twl4030-usb.c +++ b/drivers/usb/otg/twl4030-usb.c @@ -550,6 +550,7 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev) struct twl4030_usb_data *pdata = pdev->dev.platform_data; struct twl4030_usb *twl; int status, err; + u8 pwr; if (!pdata) { dev_dbg(&pdev->dev, "platform_data not available\n"); @@ -568,7 +569,10 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev) twl->otg.set_peripheral = twl4030_set_peripheral; twl->otg.set_suspend = twl4030_set_suspend; twl->usb_mode = pdata->usb_mode; - twl->asleep = 1; + + pwr = twl4030_usb_read(twl, PHY_PWR_CTRL); + + twl->asleep = (pwr & PHY_PWR_PHYPWD); /* init spinlock for workqueue */ spin_lock_init(&twl->lock); diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 2bef4415c19..4f1744c5871 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -56,6 +56,7 @@ static int debug; static const struct usb_device_id id_table[] = { { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ + { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ @@ -88,6 +89,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */ { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ + { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */ { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */ { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */ @@ -109,6 +111,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ + { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ @@ -122,14 +125,14 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ - { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ - { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ - { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ - { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */ { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */ { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */ { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */ { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */ + { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ + { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ + { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ + { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */ { } /* Terminating Entry */ }; @@ -222,8 +225,8 @@ static struct usb_serial_driver cp210x_device = { #define BITS_STOP_2 0x0002 /* CP210X_SET_BREAK */ -#define BREAK_ON 0x0000 -#define BREAK_OFF 0x0001 +#define BREAK_ON 0x0001 +#define BREAK_OFF 0x0000 /* CP210X_(SET_MHS|GET_MDMSTS) */ #define CONTROL_DTR 0x0001 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index eb12d9b096b..97cc87d654c 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -180,6 +180,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, + { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) }, @@ -750,6 +751,16 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) }, + { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_24_MASTER_WING_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_PC_WING_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_USB_DMX_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MIDI_TIMECODE_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MINI_WING_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MAXI_WING_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MEDIA_WING_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_WING_PID) }, { }, /* Optional parameter entry */ { } /* Terminating entry */ }; @@ -1376,7 +1387,7 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port) } /* set max packet size based on descriptor */ - priv->max_packet_size = ep_desc->wMaxPacketSize; + priv->max_packet_size = le16_to_cpu(ep_desc->wMaxPacketSize); dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size); } @@ -1831,7 +1842,7 @@ static int ftdi_process_packet(struct tty_struct *tty, if (port->port.console && port->sysrq) { for (i = 0; i < len; i++, ch++) { - if (!usb_serial_handle_sysrq_char(tty, port, *ch)) + if (!usb_serial_handle_sysrq_char(port, *ch)) tty_insert_flip_char(tty, *ch, flag); } } else { diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 6e612c52e76..15a4583775a 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -110,6 +110,9 @@ /* Propox devices */ #define FTDI_PROPOX_JTAGCABLEII_PID 0xD738 +/* Lenz LI-USB Computer Interface. */ +#define FTDI_LENZ_LIUSB_PID 0xD780 + /* * Xsens Technologies BV products (http://www.xsens.com). */ @@ -132,6 +135,18 @@ #define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */ /* + * ChamSys Limited (www.chamsys.co.uk) USB wing/interface product IDs + */ +#define FTDI_CHAMSYS_24_MASTER_WING_PID 0xDAF8 +#define FTDI_CHAMSYS_PC_WING_PID 0xDAF9 +#define FTDI_CHAMSYS_USB_DMX_PID 0xDAFA +#define FTDI_CHAMSYS_MIDI_TIMECODE_PID 0xDAFB +#define FTDI_CHAMSYS_MINI_WING_PID 0xDAFC +#define FTDI_CHAMSYS_MAXI_WING_PID 0xDAFD +#define FTDI_CHAMSYS_MEDIA_WING_PID 0xDAFE +#define FTDI_CHAMSYS_WING_PID 0xDAFF + +/* * Westrex International devices submitted by Cory Lee */ #define FTDI_WESTREX_MODEL_777_PID 0xDC00 /* Model 777 */ @@ -989,6 +1004,12 @@ #define ALTI2_N3_PID 0x6001 /* Neptune 3 */ /* + * Ionics PlugComputer + */ +#define IONICS_VID 0x1c0c +#define IONICS_PLUGCOMPUTER_PID 0x0102 + +/* * Dresden Elektronik Sensor Terminal Board */ #define DE_VID 0x1cf1 /* Vendor ID */ diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index ca92f67747c..e6833e216fc 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c @@ -343,7 +343,7 @@ void usb_serial_generic_process_read_urb(struct urb *urb) tty_insert_flip_string(tty, ch, urb->actual_length); else { for (i = 0; i < urb->actual_length; i++, ch++) { - if (!usb_serial_handle_sysrq_char(tty, port, *ch)) + if (!usb_serial_handle_sysrq_char(port, *ch)) tty_insert_flip_char(tty, *ch, TTY_NORMAL); } } @@ -448,12 +448,11 @@ void usb_serial_generic_unthrottle(struct tty_struct *tty) EXPORT_SYMBOL_GPL(usb_serial_generic_unthrottle); #ifdef CONFIG_MAGIC_SYSRQ -int usb_serial_handle_sysrq_char(struct tty_struct *tty, - struct usb_serial_port *port, unsigned int ch) +int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch) { if (port->sysrq && port->port.console) { if (ch && time_before(jiffies, port->sysrq)) { - handle_sysrq(ch, tty); + handle_sysrq(ch); port->sysrq = 0; return 1; } @@ -462,8 +461,7 @@ int usb_serial_handle_sysrq_char(struct tty_struct *tty, return 0; } #else -int usb_serial_handle_sysrq_char(struct tty_struct *tty, - struct usb_serial_port *port, unsigned int ch) +int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch) { return 0; } @@ -518,6 +516,7 @@ void usb_serial_generic_disconnect(struct usb_serial *serial) for (i = 0; i < serial->num_ports; ++i) generic_cleanup(serial->port[i]); } +EXPORT_SYMBOL_GPL(usb_serial_generic_disconnect); void usb_serial_generic_release(struct usb_serial *serial) { diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index dc47f986df5..a7cfc595293 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c @@ -1151,7 +1151,7 @@ static int download_fw(struct edgeport_serial *serial) /* Check if we have an old version in the I2C and update if necessary */ - if (download_cur_ver != download_new_ver) { + if (download_cur_ver < download_new_ver) { dbg("%s - Update I2C dld from %d.%d to %d.%d", __func__, firmware_version->Ver_Major, @@ -1284,7 +1284,7 @@ static int download_fw(struct edgeport_serial *serial) kfree(header); kfree(rom_desc); kfree(ti_manuf_desc); - return status; + return -EINVAL; } /* Update I2C with type 0xf2 record with correct diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 585b7e66374..1c9b6e9b238 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -119,16 +119,20 @@ * by making a change here, in moschip_port_id_table, and in * moschip_id_table_combined */ -#define USB_VENDOR_ID_BANDB 0x0856 -#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22 -#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24 -#define BANDB_DEVICE_ID_US9ML2_2 0xAC29 -#define BANDB_DEVICE_ID_US9ML2_4 0xAC30 -#define BANDB_DEVICE_ID_USPTL4_2 0xAC31 -#define BANDB_DEVICE_ID_USPTL4_4 0xAC32 -#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 -#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 -#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24 +#define USB_VENDOR_ID_BANDB 0x0856 +#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22 +#define BANDB_DEVICE_ID_USO9ML2_2P 0xBC00 +#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24 +#define BANDB_DEVICE_ID_USO9ML2_4P 0xBC01 +#define BANDB_DEVICE_ID_US9ML2_2 0xAC29 +#define BANDB_DEVICE_ID_US9ML2_4 0xAC30 +#define BANDB_DEVICE_ID_USPTL4_2 0xAC31 +#define BANDB_DEVICE_ID_USPTL4_4 0xAC32 +#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 +#define BANDB_DEVICE_ID_USOPTL4_2P 0xBC02 +#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 +#define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03 +#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24 /* This driver also supports * ATEN UC2324 device using Moschip MCS7840 @@ -184,13 +188,17 @@ static const struct usb_device_id moschip_port_id_table[] = { {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, @@ -201,13 +209,17 @@ static const struct usb_device_id moschip_id_table_combined[] __devinitconst = { {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c index a6b207c8491..1f00f243c26 100644 --- a/drivers/usb/serial/navman.c +++ b/drivers/usb/serial/navman.c @@ -25,6 +25,7 @@ static int debug; static const struct usb_device_id id_table[] = { { USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */ + { USB_DEVICE(0x0df7, 0x0900) }, /* Mobile Action i-gotU */ { }, }; MODULE_DEVICE_TABLE(usb, id_table); diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 9fc6ea2c681..c46911af282 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -164,6 +164,14 @@ static void option_instat_callback(struct urb *urb); #define YISO_VENDOR_ID 0x0EAB #define YISO_PRODUCT_U893 0xC893 +/* + * NOVATEL WIRELESS PRODUCTS + * + * Note from Novatel Wireless: + * If your Novatel modem does not work on linux, don't + * change the option module, but check our website. If + * that does not help, contact ddeschepper@nvtl.com +*/ /* MERLIN EVDO PRODUCTS */ #define NOVATELWIRELESS_PRODUCT_V640 0x1100 #define NOVATELWIRELESS_PRODUCT_V620 0x1110 @@ -185,24 +193,39 @@ static void option_instat_callback(struct urb *urb); #define NOVATELWIRELESS_PRODUCT_EU730 0x2400 #define NOVATELWIRELESS_PRODUCT_EU740 0x2410 #define NOVATELWIRELESS_PRODUCT_EU870D 0x2420 - /* OVATION PRODUCTS */ #define NOVATELWIRELESS_PRODUCT_MC727 0x4100 #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 -#define NOVATELWIRELESS_PRODUCT_U727 0x5010 -#define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100 -#define NOVATELWIRELESS_PRODUCT_MC760 0x6000 +/* + * Note from Novatel Wireless: + * All PID in the 5xxx range are currently reserved for + * auto-install CDROMs, and should not be added to this + * module. + * + * #define NOVATELWIRELESS_PRODUCT_U727 0x5010 + * #define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100 +*/ #define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002 - -/* FUTURE NOVATEL PRODUCTS */ -#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0X6001 -#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0X7000 -#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0X7001 -#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0X8000 -#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0X8001 -#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0X9000 -#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0X9001 -#define NOVATELWIRELESS_PRODUCT_GLOBAL 0XA001 +#define NOVATELWIRELESS_PRODUCT_MC780 0x6010 +#define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED 0x6000 +#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0x6001 +#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0x7000 +#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0x7001 +#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3 0x7003 +#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4 0x7004 +#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5 0x7005 +#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6 0x7006 +#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7 0x7007 +#define NOVATELWIRELESS_PRODUCT_MC996D 0x7030 +#define NOVATELWIRELESS_PRODUCT_MF3470 0x7041 +#define NOVATELWIRELESS_PRODUCT_MC547 0x7042 +#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0x8000 +#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0x8001 +#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000 +#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 +#define NOVATELWIRELESS_PRODUCT_G1 0xA001 +#define NOVATELWIRELESS_PRODUCT_G1_M 0xA002 +#define NOVATELWIRELESS_PRODUCT_G2 0xA010 /* AMOI PRODUCTS */ #define AMOI_VENDOR_ID 0x1614 @@ -365,6 +388,10 @@ static void option_instat_callback(struct urb *urb); #define OLIVETTI_VENDOR_ID 0x0b3c #define OLIVETTI_PRODUCT_OLICARD100 0xc000 +/* Celot products */ +#define CELOT_VENDOR_ID 0x211f +#define CELOT_PRODUCT_CT680M 0x6801 + /* some devices interfaces need special handling due to a number of reasons */ enum option_blacklist_reason { OPTION_BLACKLIST_NONE = 0, @@ -486,36 +513,44 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) }, - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, /* Novatel Merlin EX720/V740/X720 */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) }, /* Novatel Merlin V720/S720/PC720 */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) }, /* Novatel U730/U740 (VF version) */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) }, /* Novatel U740 */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) }, /* Novatel U870 */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) }, /* Novatel Merlin XU870 HSDPA/3G */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) }, /* Novatel X950D */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) }, /* Novatel EV620/ES620 CDMA/EV-DO */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) }, /* Novatel ES620/ES720/U720/USB720 */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) }, /* Novatel E725/E726 */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) }, /* Novatel Merlin ES620 SM Bus */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) }, /* Novatel EU730 and Vodafone EU740 */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) }, /* Novatel non-Vodafone EU740 */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727_NEW) }, /* Novatel MC727/U727/USB727 refresh */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) }, /* Novatel EVDO product */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) }, /* Novatel HSPA product */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) }, /* Novatel EVDO Embedded product */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) }, /* Novatel HSPA Embedded product */ - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL) }, /* Novatel Global product */ + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC780) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC996D) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MF3470) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC547) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, @@ -887,10 +922,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) }, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)}, - { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) }, - { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, + { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 6b600182227..8ae4c6cbc38 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -86,6 +86,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) }, { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, + { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) }, { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, @@ -788,7 +789,7 @@ static void pl2303_process_read_urb(struct urb *urb) if (port->port.console && port->sysrq) { for (i = 0; i < urb->actual_length; ++i) - if (!usb_serial_handle_sysrq_char(tty, port, data[i])) + if (!usb_serial_handle_sysrq_char(port, data[i])) tty_insert_flip_char(tty, data[i], tty_flag); } else { tty_insert_flip_string_fixed_flag(tty, data, tty_flag, diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index a871645389d..43eb9bdad42 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -128,6 +128,10 @@ #define CRESSI_VENDOR_ID 0x04b8 #define CRESSI_EDY_PRODUCT_ID 0x0521 +/* Zeagle dive computer interface */ +#define ZEAGLE_VENDOR_ID 0x04b8 +#define ZEAGLE_N2ITION3_PRODUCT_ID 0x0522 + /* Sony, USB data cable for CMD-Jxx mobile phones */ #define SONY_VENDOR_ID 0x054c #define SONY_QN3USB_PRODUCT_ID 0x0437 diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c index 6e82d4f54bc..e986002b384 100644 --- a/drivers/usb/serial/ssu100.c +++ b/drivers/usb/serial/ssu100.c @@ -15,6 +15,7 @@ #include <linux/serial.h> #include <linux/usb.h> #include <linux/usb/serial.h> +#include <linux/serial_reg.h> #include <linux/uaccess.h> #define QT_OPEN_CLOSE_CHANNEL 0xca @@ -27,36 +28,11 @@ #define QT_HW_FLOW_CONTROL_MASK 0xc5 #define QT_SW_FLOW_CONTROL_MASK 0xc6 -#define MODEM_CTL_REGISTER 0x04 -#define MODEM_STATUS_REGISTER 0x06 - - -#define SERIAL_LSR_OE 0x02 -#define SERIAL_LSR_PE 0x04 -#define SERIAL_LSR_FE 0x08 -#define SERIAL_LSR_BI 0x10 - -#define SERIAL_LSR_TEMT 0x40 - -#define SERIAL_MCR_DTR 0x01 -#define SERIAL_MCR_RTS 0x02 -#define SERIAL_MCR_LOOP 0x10 - -#define SERIAL_MSR_CTS 0x10 -#define SERIAL_MSR_CD 0x80 -#define SERIAL_MSR_RI 0x40 -#define SERIAL_MSR_DSR 0x20 #define SERIAL_MSR_MASK 0xf0 -#define SERIAL_CRTSCTS ((SERIAL_MCR_RTS << 8) | SERIAL_MSR_CTS) +#define SERIAL_CRTSCTS ((UART_MCR_RTS << 8) | UART_MSR_CTS) -#define SERIAL_8_DATA 0x03 -#define SERIAL_7_DATA 0x02 -#define SERIAL_6_DATA 0x01 -#define SERIAL_5_DATA 0x00 - -#define SERIAL_ODD_PARITY 0X08 -#define SERIAL_EVEN_PARITY 0X18 +#define SERIAL_EVEN_PARITY (UART_LCR_PARITY | UART_LCR_EPAR) #define MAX_BAUD_RATE 460800 @@ -70,7 +46,7 @@ #define FULLPWRBIT 0x00000080 #define NEXT_BOARD_POWER_BIT 0x00000004 -static int debug = 1; +static int debug; /* Version Information */ #define DRIVER_VERSION "v0.1" @@ -99,10 +75,12 @@ static struct usb_driver ssu100_driver = { }; struct ssu100_port_private { + spinlock_t status_lock; u8 shadowLSR; u8 shadowMSR; wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ unsigned short max_packet_size; + struct async_icount icount; }; static void ssu100_release(struct usb_serial *serial) @@ -150,9 +128,10 @@ static inline int ssu100_getregister(struct usb_device *dev, static inline int ssu100_setregister(struct usb_device *dev, unsigned short uart, + unsigned short reg, u16 data) { - u16 value = (data << 8) | MODEM_CTL_REGISTER; + u16 value = (data << 8) | reg; return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), QT_SET_GET_REGISTER, 0x40, value, uart, @@ -178,11 +157,11 @@ static inline int update_mctrl(struct usb_device *dev, unsigned int set, clear &= ~set; /* 'set' takes precedence over 'clear' */ urb_value = 0; if (set & TIOCM_DTR) - urb_value |= SERIAL_MCR_DTR; + urb_value |= UART_MCR_DTR; if (set & TIOCM_RTS) - urb_value |= SERIAL_MCR_RTS; + urb_value |= UART_MCR_RTS; - result = ssu100_setregister(dev, 0, urb_value); + result = ssu100_setregister(dev, 0, UART_MCR, urb_value); if (result < 0) dbg("%s Error from MODEM_CTRL urb", __func__); @@ -264,24 +243,24 @@ static void ssu100_set_termios(struct tty_struct *tty, if (cflag & PARENB) { if (cflag & PARODD) - urb_value |= SERIAL_ODD_PARITY; + urb_value |= UART_LCR_PARITY; else urb_value |= SERIAL_EVEN_PARITY; } switch (cflag & CSIZE) { case CS5: - urb_value |= SERIAL_5_DATA; + urb_value |= UART_LCR_WLEN5; break; case CS6: - urb_value |= SERIAL_6_DATA; + urb_value |= UART_LCR_WLEN6; break; case CS7: - urb_value |= SERIAL_7_DATA; + urb_value |= UART_LCR_WLEN7; break; default: case CS8: - urb_value |= SERIAL_8_DATA; + urb_value |= UART_LCR_WLEN8; break; } @@ -333,6 +312,7 @@ static int ssu100_open(struct tty_struct *tty, struct usb_serial_port *port) struct ssu100_port_private *priv = usb_get_serial_port_data(port); u8 *data; int result; + unsigned long flags; dbg("%s - port %d", __func__, port->number); @@ -350,11 +330,10 @@ static int ssu100_open(struct tty_struct *tty, struct usb_serial_port *port) return result; } - priv->shadowLSR = data[0] & (SERIAL_LSR_OE | SERIAL_LSR_PE | - SERIAL_LSR_FE | SERIAL_LSR_BI); - - priv->shadowMSR = data[1] & (SERIAL_MSR_CTS | SERIAL_MSR_DSR | - SERIAL_MSR_RI | SERIAL_MSR_CD); + spin_lock_irqsave(&priv->status_lock, flags); + priv->shadowLSR = data[0]; + priv->shadowMSR = data[1]; + spin_unlock_irqrestore(&priv->status_lock, flags); kfree(data); @@ -398,11 +377,51 @@ static int get_serial_info(struct usb_serial_port *port, return 0; } +static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) +{ + struct ssu100_port_private *priv = usb_get_serial_port_data(port); + struct async_icount prev, cur; + unsigned long flags; + + spin_lock_irqsave(&priv->status_lock, flags); + prev = priv->icount; + spin_unlock_irqrestore(&priv->status_lock, flags); + + while (1) { + wait_event_interruptible(priv->delta_msr_wait, + ((priv->icount.rng != prev.rng) || + (priv->icount.dsr != prev.dsr) || + (priv->icount.dcd != prev.dcd) || + (priv->icount.cts != prev.cts))); + + if (signal_pending(current)) + return -ERESTARTSYS; + + spin_lock_irqsave(&priv->status_lock, flags); + cur = priv->icount; + spin_unlock_irqrestore(&priv->status_lock, flags); + + if ((prev.rng == cur.rng) && + (prev.dsr == cur.dsr) && + (prev.dcd == cur.dcd) && + (prev.cts == cur.cts)) + return -EIO; + + if ((arg & TIOCM_RNG && (prev.rng != cur.rng)) || + (arg & TIOCM_DSR && (prev.dsr != cur.dsr)) || + (arg & TIOCM_CD && (prev.dcd != cur.dcd)) || + (arg & TIOCM_CTS && (prev.cts != cur.cts))) + return 0; + } + return 0; +} + static int ssu100_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg) { struct usb_serial_port *port = tty->driver_data; struct ssu100_port_private *priv = usb_get_serial_port_data(port); + void __user *user_arg = (void __user *)arg; dbg("%s cmd 0x%04x", __func__, cmd); @@ -412,28 +431,28 @@ static int ssu100_ioctl(struct tty_struct *tty, struct file *file, (struct serial_struct __user *) arg); case TIOCMIWAIT: - while (priv != NULL) { - u8 prevMSR = priv->shadowMSR & SERIAL_MSR_MASK; - interruptible_sleep_on(&priv->delta_msr_wait); - /* see if a signal did it */ - if (signal_pending(current)) - return -ERESTARTSYS; - else { - u8 diff = (priv->shadowMSR & SERIAL_MSR_MASK) ^ prevMSR; - if (!diff) - return -EIO; /* no change => error */ - - /* Return 0 if caller wanted to know about - these bits */ - - if (((arg & TIOCM_RNG) && (diff & SERIAL_MSR_RI)) || - ((arg & TIOCM_DSR) && (diff & SERIAL_MSR_DSR)) || - ((arg & TIOCM_CD) && (diff & SERIAL_MSR_CD)) || - ((arg & TIOCM_CTS) && (diff & SERIAL_MSR_CTS))) - return 0; - } - } + return wait_modem_info(port, arg); + + case TIOCGICOUNT: + { + struct serial_icounter_struct icount; + struct async_icount cnow = priv->icount; + memset(&icount, 0, sizeof(icount)); + icount.cts = cnow.cts; + icount.dsr = cnow.dsr; + icount.rng = cnow.rng; + icount.dcd = cnow.dcd; + icount.rx = cnow.rx; + icount.tx = cnow.tx; + icount.frame = cnow.frame; + icount.overrun = cnow.overrun; + icount.parity = cnow.parity; + icount.brk = cnow.brk; + icount.buf_overrun = cnow.buf_overrun; + if (copy_to_user(user_arg, &icount, sizeof(icount))) + return -EFAULT; return 0; + } default: break; @@ -455,6 +474,7 @@ static void ssu100_set_max_packet_size(struct usb_serial_port *port) unsigned num_endpoints; int i; + unsigned long flags; num_endpoints = interface->cur_altsetting->desc.bNumEndpoints; dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints); @@ -466,7 +486,9 @@ static void ssu100_set_max_packet_size(struct usb_serial_port *port) } /* set max packet size based on descriptor */ + spin_lock_irqsave(&priv->status_lock, flags); priv->max_packet_size = ep_desc->wMaxPacketSize; + spin_unlock_irqrestore(&priv->status_lock, flags); dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size); } @@ -485,9 +507,9 @@ static int ssu100_attach(struct usb_serial *serial) return -ENOMEM; } + spin_lock_init(&priv->status_lock); init_waitqueue_head(&priv->delta_msr_wait); usb_set_serial_port_data(port, priv); - ssu100_set_max_packet_size(port); return ssu100_initdevice(serial->dev); @@ -506,20 +528,20 @@ static int ssu100_tiocmget(struct tty_struct *tty, struct file *file) if (!d) return -ENOMEM; - r = ssu100_getregister(dev, 0, MODEM_CTL_REGISTER, d); + r = ssu100_getregister(dev, 0, UART_MCR, d); if (r < 0) goto mget_out; - r = ssu100_getregister(dev, 0, MODEM_STATUS_REGISTER, d+1); + r = ssu100_getregister(dev, 0, UART_MSR, d+1); if (r < 0) goto mget_out; - r = (d[0] & SERIAL_MCR_DTR ? TIOCM_DTR : 0) | - (d[0] & SERIAL_MCR_RTS ? TIOCM_RTS : 0) | - (d[1] & SERIAL_MSR_CTS ? TIOCM_CTS : 0) | - (d[1] & SERIAL_MSR_CD ? TIOCM_CAR : 0) | - (d[1] & SERIAL_MSR_RI ? TIOCM_RI : 0) | - (d[1] & SERIAL_MSR_DSR ? TIOCM_DSR : 0); + r = (d[0] & UART_MCR_DTR ? TIOCM_DTR : 0) | + (d[0] & UART_MCR_RTS ? TIOCM_RTS : 0) | + (d[1] & UART_MSR_CTS ? TIOCM_CTS : 0) | + (d[1] & UART_MSR_DCD ? TIOCM_CAR : 0) | + (d[1] & UART_MSR_RI ? TIOCM_RI : 0) | + (d[1] & UART_MSR_DSR ? TIOCM_DSR : 0); mget_out: kfree(d); @@ -546,7 +568,7 @@ static void ssu100_dtr_rts(struct usb_serial_port *port, int on) if (!port->serial->disconnected) { /* Disable flow control */ if (!on && - ssu100_setregister(dev, 0, 0) < 0) + ssu100_setregister(dev, 0, UART_MCR, 0) < 0) dev_err(&port->dev, "error from flowcontrol urb\n"); /* drop RTS and DTR */ if (on) @@ -557,34 +579,88 @@ static void ssu100_dtr_rts(struct usb_serial_port *port, int on) mutex_unlock(&port->serial->disc_mutex); } +static void ssu100_update_msr(struct usb_serial_port *port, u8 msr) +{ + struct ssu100_port_private *priv = usb_get_serial_port_data(port); + unsigned long flags; + + spin_lock_irqsave(&priv->status_lock, flags); + priv->shadowMSR = msr; + spin_unlock_irqrestore(&priv->status_lock, flags); + + if (msr & UART_MSR_ANY_DELTA) { + /* update input line counters */ + if (msr & UART_MSR_DCTS) + priv->icount.cts++; + if (msr & UART_MSR_DDSR) + priv->icount.dsr++; + if (msr & UART_MSR_DDCD) + priv->icount.dcd++; + if (msr & UART_MSR_TERI) + priv->icount.rng++; + wake_up_interruptible(&priv->delta_msr_wait); + } +} + +static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr, + char *tty_flag) +{ + struct ssu100_port_private *priv = usb_get_serial_port_data(port); + unsigned long flags; + + spin_lock_irqsave(&priv->status_lock, flags); + priv->shadowLSR = lsr; + spin_unlock_irqrestore(&priv->status_lock, flags); + + *tty_flag = TTY_NORMAL; + if (lsr & UART_LSR_BRK_ERROR_BITS) { + /* we always want to update icount, but we only want to + * update tty_flag for one case */ + if (lsr & UART_LSR_BI) { + priv->icount.brk++; + *tty_flag = TTY_BREAK; + usb_serial_handle_break(port); + } + if (lsr & UART_LSR_PE) { + priv->icount.parity++; + if (*tty_flag == TTY_NORMAL) + *tty_flag = TTY_PARITY; + } + if (lsr & UART_LSR_FE) { + priv->icount.frame++; + if (*tty_flag == TTY_NORMAL) + *tty_flag = TTY_FRAME; + } + if (lsr & UART_LSR_OE){ + priv->icount.overrun++; + if (*tty_flag == TTY_NORMAL) + *tty_flag = TTY_OVERRUN; + } + } + +} + static int ssu100_process_packet(struct tty_struct *tty, struct usb_serial_port *port, struct ssu100_port_private *priv, char *packet, int len) { int i; - char flag; + char flag = TTY_NORMAL; char *ch; dbg("%s - port %d", __func__, port->number); - if (len < 4) { - dbg("%s - malformed packet", __func__); - return 0; - } - - if ((packet[0] == 0x1b) && (packet[1] == 0x1b) && + if ((len >= 4) && + (packet[0] == 0x1b) && (packet[1] == 0x1b) && ((packet[2] == 0x00) || (packet[2] == 0x01))) { - if (packet[2] == 0x00) - priv->shadowLSR = packet[3] & (SERIAL_LSR_OE | - SERIAL_LSR_PE | - SERIAL_LSR_FE | - SERIAL_LSR_BI); - - if (packet[2] == 0x01) { - priv->shadowMSR = packet[3]; - wake_up_interruptible(&priv->delta_msr_wait); + if (packet[2] == 0x00) { + ssu100_update_lsr(port, packet[3], &flag); + if (flag == TTY_OVERRUN) + tty_insert_flip_char(tty, 0, TTY_OVERRUN); } + if (packet[2] == 0x01) + ssu100_update_msr(port, packet[3]); len -= 4; ch = packet + 4; @@ -596,7 +672,7 @@ static int ssu100_process_packet(struct tty_struct *tty, if (port->port.console && port->sysrq) { for (i = 0; i < len; i++, ch++) { - if (!usb_serial_handle_sysrq_char(tty, port, *ch)) + if (!usb_serial_handle_sysrq_char(port, *ch)) tty_insert_flip_char(tty, *ch, flag); } } else @@ -631,7 +707,6 @@ static void ssu100_process_read_urb(struct urb *urb) tty_kref_put(tty); } - static struct usb_serial_driver ssu100_device = { .driver = { .owner = THIS_MODULE, @@ -653,6 +728,7 @@ static struct usb_serial_driver ssu100_device = { .tiocmset = ssu100_tiocmset, .ioctl = ssu100_ioctl, .set_termios = ssu100_set_termios, + .disconnect = usb_serial_generic_disconnect, }; static int __init ssu100_init(void) diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 2a982e62963..7a2177c79bd 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -736,6 +736,7 @@ int usb_serial_probe(struct usb_interface *interface, serial = create_serial(dev, interface, type); if (!serial) { + module_put(type->driver.owner); dev_err(&interface->dev, "%s - out of memory\n", __func__); return -ENOMEM; } @@ -746,11 +747,11 @@ int usb_serial_probe(struct usb_interface *interface, id = get_iface_id(type, interface); retval = type->probe(serial, id); - module_put(type->driver.owner); if (retval) { dbg("sub driver rejected device"); kfree(serial); + module_put(type->driver.owner); return retval; } } @@ -822,6 +823,7 @@ int usb_serial_probe(struct usb_interface *interface, if (num_bulk_in == 0 || num_bulk_out == 0) { dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n"); kfree(serial); + module_put(type->driver.owner); return -ENODEV; } } @@ -835,22 +837,15 @@ int usb_serial_probe(struct usb_interface *interface, dev_err(&interface->dev, "Generic device with no bulk out, not allowed.\n"); kfree(serial); + module_put(type->driver.owner); return -EIO; } } #endif if (!num_ports) { /* if this device type has a calc_num_ports function, call it */ - if (type->calc_num_ports) { - if (!try_module_get(type->driver.owner)) { - dev_err(&interface->dev, - "module get failed, exiting\n"); - kfree(serial); - return -EIO; - } + if (type->calc_num_ports) num_ports = type->calc_num_ports(serial); - module_put(type->driver.owner); - } if (!num_ports) num_ports = type->num_ports; } @@ -1039,13 +1034,7 @@ int usb_serial_probe(struct usb_interface *interface, /* if this device type has an attach function, call it */ if (type->attach) { - if (!try_module_get(type->driver.owner)) { - dev_err(&interface->dev, - "module get failed, exiting\n"); - goto probe_error; - } retval = type->attach(serial); - module_put(type->driver.owner); if (retval < 0) goto probe_error; serial->attached = 1; @@ -1088,10 +1077,12 @@ int usb_serial_probe(struct usb_interface *interface, exit: /* success */ usb_set_intfdata(interface, serial); + module_put(type->driver.owner); return 0; probe_error: usb_serial_put(serial); + module_put(type->driver.owner); return -EIO; } EXPORT_SYMBOL_GPL(usb_serial_probe); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index e05557d5299..4b99117f3ec 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -323,7 +323,10 @@ void vhost_dev_cleanup(struct vhost_dev *dev) dev->mm = NULL; WARN_ON(!list_empty(&dev->work_list)); - kthread_stop(dev->worker); + if (dev->worker) { + kthread_stop(dev->worker); + dev->worker = NULL; + } } static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c index afe21e6eb54..1c2c68356ea 100644 --- a/drivers/video/amba-clcd.c +++ b/drivers/video/amba-clcd.c @@ -80,7 +80,10 @@ static void clcdfb_disable(struct clcd_fb *fb) /* * Disable CLCD clock source. */ - clk_disable(fb->clk); + if (fb->clk_enabled) { + fb->clk_enabled = false; + clk_disable(fb->clk); + } } static void clcdfb_enable(struct clcd_fb *fb, u32 cntl) @@ -88,7 +91,10 @@ static void clcdfb_enable(struct clcd_fb *fb, u32 cntl) /* * Enable the CLCD clock source. */ - clk_enable(fb->clk); + if (!fb->clk_enabled) { + fb->clk_enabled = true; + clk_enable(fb->clk); + } /* * Bring up by first enabling.. diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h index f3a4e15672d..f96a471cb1a 100644 --- a/drivers/video/matrox/matroxfb_base.h +++ b/drivers/video/matrox/matroxfb_base.h @@ -151,13 +151,13 @@ static inline void mga_writel(vaddr_t va, unsigned int offs, u_int32_t value) { static inline void mga_memcpy_toio(vaddr_t va, const void* src, int len) { #if defined(__alpha__) || defined(__i386__) || defined(__x86_64__) /* - * memcpy_toio works for us if: + * iowrite32_rep works for us if: * (1) Copies data as 32bit quantities, not byte after byte, * (2) Performs LE ordered stores, and * (3) It copes with unaligned source (destination is guaranteed to be page * aligned and length is guaranteed to be multiple of 4). */ - memcpy_toio(va.vaddr, src, len); + iowrite32_rep(va.vaddr, src, len >> 2); #else u_int32_t __iomem* addr = va.vaddr; diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 4d2992aadfb..b036677df8c 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -574,16 +574,21 @@ config IT87_WDT be called it87_wdt. config HP_WATCHDOG - tristate "HP Proliant iLO 2 Hardware Watchdog Timer" + tristate "HP Proliant iLO2+ Hardware Watchdog Timer" depends on X86 help A software monitoring watchdog and NMI sourcing driver. This driver - will detect lockups and provide stack trace. Also, when an NMI - occurs this driver will make the necessary BIOS calls to log - the cause of the NMI. This is a driver that will only load on a - HP ProLiant system with a minimum of iLO2 support. - To compile this driver as a module, choose M here: the - module will be called hpwdt. + will detect lockups and provide a stack trace. This is a driver that + will only load on a HP ProLiant system with a minimum of iLO2 support. + To compile this driver as a module, choose M here: the module will be + called hpwdt. + +config HPWDT_NMI_DECODING + bool "NMI decoding support for the HP ProLiant iLO2+ Hardware Watchdog Timer" + depends on HP_WATCHDOG + help + When an NMI occurs this feature will make the necessary BIOS calls to + log the cause of the NMI. config SC1200_WDT tristate "National Semiconductor PC87307/PC97307 (ala SC1200) Watchdog" diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index fd312fc8940..3d77116e463 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c @@ -16,38 +16,55 @@ #include <linux/device.h> #include <linux/fs.h> #include <linux/init.h> -#include <linux/interrupt.h> #include <linux/io.h> -#include <linux/irq.h> -#include <linux/nmi.h> +#include <linux/bitops.h> #include <linux/kernel.h> #include <linux/miscdevice.h> -#include <linux/mm.h> #include <linux/module.h> -#include <linux/kdebug.h> #include <linux/moduleparam.h> -#include <linux/notifier.h> #include <linux/pci.h> #include <linux/pci_ids.h> -#include <linux/reboot.h> -#include <linux/sched.h> -#include <linux/timer.h> #include <linux/types.h> #include <linux/uaccess.h> #include <linux/watchdog.h> +#ifdef CONFIG_HPWDT_NMI_DECODING #include <linux/dmi.h> -#include <linux/efi.h> -#include <linux/string.h> -#include <linux/bootmem.h> -#include <asm/desc.h> +#include <linux/spinlock.h> +#include <linux/nmi.h> +#include <linux/kdebug.h> +#include <linux/notifier.h> #include <asm/cacheflush.h> +#endif /* CONFIG_HPWDT_NMI_DECODING */ + +#define HPWDT_VERSION "1.2.0" +#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128) +#define TICKS_TO_SECS(ticks) ((ticks) * 128 / 1000) +#define HPWDT_MAX_TIMER TICKS_TO_SECS(65535) +#define DEFAULT_MARGIN 30 + +static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */ +static unsigned int reload; /* the computed soft_margin */ +static int nowayout = WATCHDOG_NOWAYOUT; +static char expect_release; +static unsigned long hpwdt_is_open; + +static void __iomem *pci_mem_addr; /* the PCI-memory address */ +static unsigned long __iomem *hpwdt_timer_reg; +static unsigned long __iomem *hpwdt_timer_con; +static struct pci_device_id hpwdt_devices[] = { + { PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB203) }, /* iLO2 */ + { PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3306) }, /* iLO3 */ + {0}, /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, hpwdt_devices); + +#ifdef CONFIG_HPWDT_NMI_DECODING #define PCI_BIOS32_SD_VALUE 0x5F32335F /* "_32_" */ #define CRU_BIOS_SIGNATURE_VALUE 0x55524324 #define PCI_BIOS32_PARAGRAPH_LEN 16 #define PCI_ROM_BASE1 0x000F0000 #define ROM_SIZE 0x10000 -#define HPWDT_VERSION "1.1.1" struct bios32_service_dir { u32 signature; @@ -112,37 +129,17 @@ struct cmn_registers { u32 reflags; } __attribute__((packed)); -#define DEFAULT_MARGIN 30 -static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */ -static unsigned int reload; /* the computed soft_margin */ -static int nowayout = WATCHDOG_NOWAYOUT; -static char expect_release; -static unsigned long hpwdt_is_open; +static unsigned int hpwdt_nmi_decoding; static unsigned int allow_kdump; -static unsigned int hpwdt_nmi_sourcing; static unsigned int priority; /* hpwdt at end of die_notify list */ - -static void __iomem *pci_mem_addr; /* the PCI-memory address */ -static unsigned long __iomem *hpwdt_timer_reg; -static unsigned long __iomem *hpwdt_timer_con; - static DEFINE_SPINLOCK(rom_lock); - static void *cru_rom_addr; - static struct cmn_registers cmn_regs; -static struct pci_device_id hpwdt_devices[] = { - { PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB203) }, - { PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3306) }, - {0}, /* terminate list */ -}; -MODULE_DEVICE_TABLE(pci, hpwdt_devices); - extern asmlinkage void asminline_call(struct cmn_registers *pi86Regs, unsigned long *pRomEntry); -#ifndef CONFIG_X86_64 +#ifdef CONFIG_X86_32 /* --32 Bit Bios------------------------------------------------------------ */ #define HPWDT_ARCH 32 @@ -331,8 +328,9 @@ static int __devinit detect_cru_service(void) iounmap(p); return rc; } - -#else +/* ------------------------------------------------------------------------- */ +#endif /* CONFIG_X86_32 */ +#ifdef CONFIG_X86_64 /* --64 Bit Bios------------------------------------------------------------ */ #define HPWDT_ARCH 64 @@ -410,17 +408,16 @@ static int __devinit detect_cru_service(void) /* if cru_rom_addr has been set then we found a CRU service */ return ((cru_rom_addr != NULL) ? 0 : -ENODEV); } - /* ------------------------------------------------------------------------- */ - -#endif +#endif /* CONFIG_X86_64 */ +#endif /* CONFIG_HPWDT_NMI_DECODING */ /* * Watchdog operations */ static void hpwdt_start(void) { - reload = (soft_margin * 1000) / 128; + reload = SECS_TO_TICKS(soft_margin); iowrite16(reload, hpwdt_timer_reg); iowrite16(0x85, hpwdt_timer_con); } @@ -441,8 +438,7 @@ static void hpwdt_ping(void) static int hpwdt_change_timer(int new_margin) { - /* Arbitrary, can't find the card's limits */ - if (new_margin < 5 || new_margin > 600) { + if (new_margin < 1 || new_margin > HPWDT_MAX_TIMER) { printk(KERN_WARNING "hpwdt: New value passed in is invalid: %d seconds.\n", new_margin); @@ -453,11 +449,17 @@ static int hpwdt_change_timer(int new_margin) printk(KERN_DEBUG "hpwdt: New timer passed in is %d seconds.\n", new_margin); - reload = (soft_margin * 1000) / 128; + reload = SECS_TO_TICKS(soft_margin); return 0; } +static int hpwdt_time_left(void) +{ + return TICKS_TO_SECS(ioread16(hpwdt_timer_reg)); +} + +#ifdef CONFIG_HPWDT_NMI_DECODING /* * NMI Handler */ @@ -468,26 +470,29 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason, static int die_nmi_called; if (ulReason != DIE_NMI && ulReason != DIE_NMI_IPI) - return NOTIFY_OK; - - if (hpwdt_nmi_sourcing) { - spin_lock_irqsave(&rom_lock, rom_pl); - if (!die_nmi_called) - asminline_call(&cmn_regs, cru_rom_addr); - die_nmi_called = 1; - spin_unlock_irqrestore(&rom_lock, rom_pl); - if (cmn_regs.u1.ral == 0) { - printk(KERN_WARNING "hpwdt: An NMI occurred, " - "but unable to determine source.\n"); - } else { - if (allow_kdump) - hpwdt_stop(); - panic("An NMI occurred, please see the Integrated " - "Management Log for details.\n"); - } + goto out; + + if (!hpwdt_nmi_decoding) + goto out; + + spin_lock_irqsave(&rom_lock, rom_pl); + if (!die_nmi_called) + asminline_call(&cmn_regs, cru_rom_addr); + die_nmi_called = 1; + spin_unlock_irqrestore(&rom_lock, rom_pl); + if (cmn_regs.u1.ral == 0) { + printk(KERN_WARNING "hpwdt: An NMI occurred, " + "but unable to determine source.\n"); + } else { + if (allow_kdump) + hpwdt_stop(); + panic("An NMI occurred, please see the Integrated " + "Management Log for details.\n"); } +out: return NOTIFY_OK; } +#endif /* CONFIG_HPWDT_NMI_DECODING */ /* * /dev/watchdog handling @@ -557,7 +562,7 @@ static const struct watchdog_info ident = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, - .identity = "HP iLO2 HW Watchdog Timer", + .identity = "HP iLO2+ HW Watchdog Timer", }; static long hpwdt_ioctl(struct file *file, unsigned int cmd, @@ -599,6 +604,10 @@ static long hpwdt_ioctl(struct file *file, unsigned int cmd, case WDIOC_GETTIMEOUT: ret = put_user(soft_margin, p); break; + + case WDIOC_GETTIMELEFT: + ret = put_user(hpwdt_time_left(), p); + break; } return ret; } @@ -621,80 +630,45 @@ static struct miscdevice hpwdt_miscdev = { .fops = &hpwdt_fops, }; +#ifdef CONFIG_HPWDT_NMI_DECODING static struct notifier_block die_notifier = { .notifier_call = hpwdt_pretimeout, .priority = 0, }; +#endif /* CONFIG_HPWDT_NMI_DECODING */ /* * Init & Exit */ +#ifdef CONFIG_HPWDT_NMI_DECODING #ifdef ARCH_HAS_NMI_WATCHDOG -static void __devinit hpwdt_check_nmi_sourcing(struct pci_dev *dev) +static void __devinit hpwdt_check_nmi_decoding(struct pci_dev *dev) { /* * If nmi_watchdog is turned off then we can turn on - * our nmi sourcing capability. + * our nmi decoding capability. */ if (!nmi_watchdog_active()) - hpwdt_nmi_sourcing = 1; + hpwdt_nmi_decoding = 1; else - dev_warn(&dev->dev, "NMI sourcing is disabled. To enable this " + dev_warn(&dev->dev, "NMI decoding is disabled. To enable this " "functionality you must reboot with nmi_watchdog=0 " "and load the hpwdt driver with priority=1.\n"); } #else -static void __devinit hpwdt_check_nmi_sourcing(struct pci_dev *dev) +static void __devinit hpwdt_check_nmi_decoding(struct pci_dev *dev) { - dev_warn(&dev->dev, "NMI sourcing is disabled. " + dev_warn(&dev->dev, "NMI decoding is disabled. " "Your kernel does not support a NMI Watchdog.\n"); } -#endif +#endif /* ARCH_HAS_NMI_WATCHDOG */ -static int __devinit hpwdt_init_one(struct pci_dev *dev, - const struct pci_device_id *ent) +static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev) { int retval; /* - * Check if we can do NMI sourcing or not - */ - hpwdt_check_nmi_sourcing(dev); - - /* - * First let's find out if we are on an iLO2 server. We will - * not run on a legacy ASM box. - * So we only support the G5 ProLiant servers and higher. - */ - if (dev->subsystem_vendor != PCI_VENDOR_ID_HP) { - dev_warn(&dev->dev, - "This server does not have an iLO2 ASIC.\n"); - return -ENODEV; - } - - if (pci_enable_device(dev)) { - dev_warn(&dev->dev, - "Not possible to enable PCI Device: 0x%x:0x%x.\n", - ent->vendor, ent->device); - return -ENODEV; - } - - pci_mem_addr = pci_iomap(dev, 1, 0x80); - if (!pci_mem_addr) { - dev_warn(&dev->dev, - "Unable to detect the iLO2 server memory.\n"); - retval = -ENOMEM; - goto error_pci_iomap; - } - hpwdt_timer_reg = pci_mem_addr + 0x70; - hpwdt_timer_con = pci_mem_addr + 0x72; - - /* Make sure that we have a valid soft_margin */ - if (hpwdt_change_timer(soft_margin)) - hpwdt_change_timer(DEFAULT_MARGIN); - - /* * We need to map the ROM to get the CRU service. * For 32 bit Operating Systems we need to go through the 32 Bit * BIOS Service Directory @@ -705,7 +679,7 @@ static int __devinit hpwdt_init_one(struct pci_dev *dev, dev_warn(&dev->dev, "Unable to detect the %d Bit CRU Service.\n", HPWDT_ARCH); - goto error_get_cru; + return retval; } /* @@ -728,9 +702,87 @@ static int __devinit hpwdt_init_one(struct pci_dev *dev, dev_warn(&dev->dev, "Unable to register a die notifier (err=%d).\n", retval); - goto error_die_notifier; + if (cru_rom_addr) + iounmap(cru_rom_addr); } + dev_info(&dev->dev, + "HP Watchdog Timer Driver: NMI decoding initialized" + ", allow kernel dump: %s (default = 0/OFF)" + ", priority: %s (default = 0/LAST).\n", + (allow_kdump == 0) ? "OFF" : "ON", + (priority == 0) ? "LAST" : "FIRST"); + return 0; +} + +static void __devexit hpwdt_exit_nmi_decoding(void) +{ + unregister_die_notifier(&die_notifier); + if (cru_rom_addr) + iounmap(cru_rom_addr); +} +#else /* !CONFIG_HPWDT_NMI_DECODING */ +static void __devinit hpwdt_check_nmi_decoding(struct pci_dev *dev) +{ +} + +static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev) +{ + return 0; +} + +static void __devexit hpwdt_exit_nmi_decoding(void) +{ +} +#endif /* CONFIG_HPWDT_NMI_DECODING */ + +static int __devinit hpwdt_init_one(struct pci_dev *dev, + const struct pci_device_id *ent) +{ + int retval; + + /* + * Check if we can do NMI decoding or not + */ + hpwdt_check_nmi_decoding(dev); + + /* + * First let's find out if we are on an iLO2+ server. We will + * not run on a legacy ASM box. + * So we only support the G5 ProLiant servers and higher. + */ + if (dev->subsystem_vendor != PCI_VENDOR_ID_HP) { + dev_warn(&dev->dev, + "This server does not have an iLO2+ ASIC.\n"); + return -ENODEV; + } + + if (pci_enable_device(dev)) { + dev_warn(&dev->dev, + "Not possible to enable PCI Device: 0x%x:0x%x.\n", + ent->vendor, ent->device); + return -ENODEV; + } + + pci_mem_addr = pci_iomap(dev, 1, 0x80); + if (!pci_mem_addr) { + dev_warn(&dev->dev, + "Unable to detect the iLO2+ server memory.\n"); + retval = -ENOMEM; + goto error_pci_iomap; + } + hpwdt_timer_reg = pci_mem_addr + 0x70; + hpwdt_timer_con = pci_mem_addr + 0x72; + + /* Make sure that we have a valid soft_margin */ + if (hpwdt_change_timer(soft_margin)) + hpwdt_change_timer(DEFAULT_MARGIN); + + /* Initialize NMI Decoding functionality */ + retval = hpwdt_init_nmi_decoding(dev); + if (retval != 0) + goto error_init_nmi_decoding; + retval = misc_register(&hpwdt_miscdev); if (retval < 0) { dev_warn(&dev->dev, @@ -739,23 +791,14 @@ static int __devinit hpwdt_init_one(struct pci_dev *dev, goto error_misc_register; } - printk(KERN_INFO - "hp Watchdog Timer Driver: %s" - ", timer margin: %d seconds (nowayout=%d)" - ", allow kernel dump: %s (default = 0/OFF)" - ", priority: %s (default = 0/LAST).\n", - HPWDT_VERSION, soft_margin, nowayout, - (allow_kdump == 0) ? "OFF" : "ON", - (priority == 0) ? "LAST" : "FIRST"); - + dev_info(&dev->dev, "HP Watchdog Timer Driver: %s" + ", timer margin: %d seconds (nowayout=%d).\n", + HPWDT_VERSION, soft_margin, nowayout); return 0; error_misc_register: - unregister_die_notifier(&die_notifier); -error_die_notifier: - if (cru_rom_addr) - iounmap(cru_rom_addr); -error_get_cru: + hpwdt_exit_nmi_decoding(); +error_init_nmi_decoding: pci_iounmap(dev, pci_mem_addr); error_pci_iomap: pci_disable_device(dev); @@ -768,10 +811,7 @@ static void __devexit hpwdt_exit(struct pci_dev *dev) hpwdt_stop(); misc_deregister(&hpwdt_miscdev); - unregister_die_notifier(&die_notifier); - - if (cru_rom_addr) - iounmap(cru_rom_addr); + hpwdt_exit_nmi_decoding(); pci_iounmap(dev, pci_mem_addr); pci_disable_device(dev); } @@ -802,16 +842,18 @@ MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); module_param(soft_margin, int, 0); MODULE_PARM_DESC(soft_margin, "Watchdog timeout in seconds"); -module_param(allow_kdump, int, 0); -MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs"); - module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); +#ifdef CONFIG_HPWDT_NMI_DECODING +module_param(allow_kdump, int, 0); +MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs"); + module_param(priority, int, 0); MODULE_PARM_DESC(priority, "The hpwdt driver handles NMIs first or last" " (default = 0/Last)\n"); +#endif /* !CONFIG_HPWDT_NMI_DECODING */ module_init(hpwdt_init); module_exit(hpwdt_cleanup); diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 72f91bff29c..13365ba3521 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -112,6 +112,7 @@ static inline unsigned long *cpu_evtchn_mask(int cpu) #define VALID_EVTCHN(chn) ((chn) != 0) static struct irq_chip xen_dynamic_chip; +static struct irq_chip xen_percpu_chip; /* Constructor for packed IRQ information. */ static struct irq_info mk_unbound_info(void) @@ -377,7 +378,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) irq = find_unbound_irq(); set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, - handle_level_irq, "event"); + handle_edge_irq, "event"); evtchn_to_irq[evtchn] = irq; irq_info[irq] = mk_evtchn_info(evtchn); @@ -403,8 +404,8 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) if (irq < 0) goto out; - set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, - handle_level_irq, "ipi"); + set_irq_chip_and_handler_name(irq, &xen_percpu_chip, + handle_percpu_irq, "ipi"); bind_ipi.vcpu = cpu; if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, @@ -444,8 +445,8 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) irq = find_unbound_irq(); - set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, - handle_level_irq, "virq"); + set_irq_chip_and_handler_name(irq, &xen_percpu_chip, + handle_percpu_irq, "virq"); evtchn_to_irq[evtchn] = irq; irq_info[irq] = mk_virq_info(evtchn, virq); @@ -964,6 +965,16 @@ static struct irq_chip xen_dynamic_chip __read_mostly = { .retrigger = retrigger_dynirq, }; +static struct irq_chip xen_percpu_chip __read_mostly = { + .name = "xen-percpu", + + .disable = disable_dynirq, + .mask = disable_dynirq, + .unmask = enable_dynirq, + + .ack = ack_dynirq, +}; + int xen_set_callback_via(uint64_t via) { struct xen_hvm_param a; diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index 1799bd89031..ef9c7db5207 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -237,7 +237,7 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec, goto again; if (sysrq_key != '\0') - handle_sysrq(sysrq_key, NULL); + handle_sysrq(sysrq_key); } static struct xenbus_watch sysrq_watch = { |