diff options
Diffstat (limited to 'drivers/tty/hvc')
| -rw-r--r-- | drivers/tty/hvc/Kconfig | 124 | ||||
| -rw-r--r-- | drivers/tty/hvc/Makefile | 14 | ||||
| -rw-r--r-- | drivers/tty/hvc/hvc_beat.c | 134 | ||||
| -rw-r--r-- | drivers/tty/hvc/hvc_bfin_jtag.c | 105 | ||||
| -rw-r--r-- | drivers/tty/hvc/hvc_console.c | 1022 | ||||
| -rw-r--r-- | drivers/tty/hvc/hvc_console.h | 124 | ||||
| -rw-r--r-- | drivers/tty/hvc/hvc_dcc.c | 89 | ||||
| -rw-r--r-- | drivers/tty/hvc/hvc_irq.c | 49 | ||||
| -rw-r--r-- | drivers/tty/hvc/hvc_iucv.c | 1461 | ||||
| -rw-r--r-- | drivers/tty/hvc/hvc_opal.c | 439 | ||||
| -rw-r--r-- | drivers/tty/hvc/hvc_rtas.c | 123 | ||||
| -rw-r--r-- | drivers/tty/hvc/hvc_tile.c | 205 | ||||
| -rw-r--r-- | drivers/tty/hvc/hvc_udbg.c | 95 | ||||
| -rw-r--r-- | drivers/tty/hvc/hvc_vio.c | 499 | ||||
| -rw-r--r-- | drivers/tty/hvc/hvc_xen.c | 658 | ||||
| -rw-r--r-- | drivers/tty/hvc/hvcs.c | 1616 | ||||
| -rw-r--r-- | drivers/tty/hvc/hvsi.c | 1221 | ||||
| -rw-r--r-- | drivers/tty/hvc/hvsi_lib.c | 424 | 
18 files changed, 8402 insertions, 0 deletions
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig new file mode 100644 index 00000000000..8902f9b4df7 --- /dev/null +++ b/drivers/tty/hvc/Kconfig @@ -0,0 +1,124 @@ +if TTY + +config HVC_DRIVER +	bool +	help +	  Generic "hypervisor virtual console" infrastructure for various +	  hypervisors (pSeries, iSeries, Xen, lguest). +	  It will automatically be selected if one of the back-end console drivers +	  is selected. + +config HVC_IRQ +	bool + +config HVC_CONSOLE +	bool "pSeries Hypervisor Virtual Console support" +	depends on PPC_PSERIES +	select HVC_DRIVER +	select HVC_IRQ +	help +	  pSeries machines when partitioned support a hypervisor virtual +	  console. This driver allows each pSeries partition to have a console +	  which is accessed via the HMC. + +config HVC_OLD_HVSI +	bool "Old driver for pSeries serial port (/dev/hvsi*)" +	depends on HVC_CONSOLE +	default n + +config HVC_OPAL +	bool "OPAL Console support" +	depends on PPC_POWERNV +	select HVC_DRIVER +	select HVC_IRQ +	default y +	help +	  PowerNV machines running under OPAL need that driver to get a console + +config HVC_RTAS +	bool "IBM RTAS Console support" +	depends on PPC_RTAS +	select HVC_DRIVER +	help +	  IBM Console device driver which makes use of RTAS + +config HVC_BEAT +	bool "Toshiba's Beat Hypervisor Console support" +	depends on PPC_CELLEB +	select HVC_DRIVER +	help +	  Toshiba's Cell Reference Set Beat Console device driver + +config HVC_IUCV +	bool "z/VM IUCV Hypervisor console support (VM only)" +	depends on S390 +	select HVC_DRIVER +	select IUCV +	default y +	help +	  This driver provides a Hypervisor console (HVC) back-end to access +	  a Linux (console) terminal via a z/VM IUCV communication path. + +config HVC_XEN +	bool "Xen Hypervisor Console support" +	depends on XEN +	select HVC_DRIVER +	select HVC_IRQ +	default y +	help +	  Xen virtual console device driver + +config HVC_XEN_FRONTEND +	bool "Xen Hypervisor Multiple Consoles support" +	depends on HVC_XEN +	select XEN_XENBUS_FRONTEND +	default y +	help +	  Xen driver for secondary virtual consoles + +config HVC_UDBG +       bool "udbg based fake hypervisor console" +       depends on PPC +       select HVC_DRIVER +       default n +       help +         This is meant to be used during HW bring up or debugging when +	 no other console mechanism exist but udbg, to get you a quick +	 console for userspace. Do NOT enable in production kernels.  + +config HVC_DCC +       bool "ARM JTAG DCC console" +       depends on ARM +       select HVC_DRIVER +       help +         This console uses the JTAG DCC on ARM to create a console under the HVC +	 driver. This console is used through a JTAG only on ARM. If you don't have +	 a JTAG then you probably don't want this option. + +config HVC_BFIN_JTAG +	bool "Blackfin JTAG console" +	depends on BLACKFIN +	select HVC_DRIVER +	help +	 This console uses the Blackfin JTAG to create a console under the +	 the HVC driver.  If you don't have JTAG, then you probably don't +	 want this option. + +config HVCS +	tristate "IBM Hypervisor Virtual Console Server support" +	depends on PPC_PSERIES && HVC_CONSOLE +	help +	  Partitionable IBM Power5 ppc64 machines allow hosting of +	  firmware virtual consoles from one Linux partition by +	  another Linux partition.  This driver allows console data +	  from Linux partitions to be accessed through TTY device +	  interfaces in the device tree of a Linux partition running +	  this driver. + +	  To compile this driver as a module, choose M here: the +	  module will be called hvcs.  Additionally, this module +	  will depend on arch specific APIs exported from hvcserver.ko +	  which will also be compiled when this driver is built as a +	  module. + +endif # TTY diff --git a/drivers/tty/hvc/Makefile b/drivers/tty/hvc/Makefile new file mode 100644 index 00000000000..4ca3723b0a3 --- /dev/null +++ b/drivers/tty/hvc/Makefile @@ -0,0 +1,14 @@ +obj-$(CONFIG_HVC_CONSOLE)	+= hvc_vio.o hvsi_lib.o +obj-$(CONFIG_HVC_OPAL)		+= hvc_opal.o hvsi_lib.o +obj-$(CONFIG_HVC_OLD_HVSI)	+= hvsi.o +obj-$(CONFIG_HVC_RTAS)		+= hvc_rtas.o +obj-$(CONFIG_HVC_TILE)		+= hvc_tile.o +obj-$(CONFIG_HVC_DCC)		+= hvc_dcc.o +obj-$(CONFIG_HVC_BEAT)		+= hvc_beat.o +obj-$(CONFIG_HVC_DRIVER)	+= hvc_console.o +obj-$(CONFIG_HVC_IRQ)		+= hvc_irq.o +obj-$(CONFIG_HVC_XEN)		+= hvc_xen.o +obj-$(CONFIG_HVC_IUCV)		+= hvc_iucv.o +obj-$(CONFIG_HVC_UDBG)		+= hvc_udbg.o +obj-$(CONFIG_HVC_BFIN_JTAG)	+= hvc_bfin_jtag.o +obj-$(CONFIG_HVCS)		+= hvcs.o diff --git a/drivers/tty/hvc/hvc_beat.c b/drivers/tty/hvc/hvc_beat.c new file mode 100644 index 00000000000..1560d235449 --- /dev/null +++ b/drivers/tty/hvc/hvc_beat.c @@ -0,0 +1,134 @@ +/* + * Beat hypervisor console driver + * + * (C) Copyright 2006 TOSHIBA CORPORATION + * + * This code is based on drivers/char/hvc_rtas.c: + * (C) Copyright IBM Corporation 2001-2005 + * (C) Copyright Red Hat, Inc. 2005 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/string.h> +#include <linux/console.h> +#include <asm/prom.h> +#include <asm/hvconsole.h> +#include <asm/firmware.h> + +#include "hvc_console.h" + +extern int64_t beat_get_term_char(uint64_t, uint64_t *, uint64_t *, uint64_t *); +extern int64_t beat_put_term_char(uint64_t, uint64_t, uint64_t, uint64_t); + +struct hvc_struct *hvc_beat_dev = NULL; + +/* bug: only one queue is available regardless of vtermno */ +static int hvc_beat_get_chars(uint32_t vtermno, char *buf, int cnt) +{ +	static unsigned char q[sizeof(unsigned long) * 2] +		__attribute__((aligned(sizeof(unsigned long)))); +	static int qlen = 0; +	u64 got; + +again: +	if (qlen) { +		if (qlen > cnt) { +			memcpy(buf, q, cnt); +			qlen -= cnt; +			memmove(q + cnt, q, qlen); +			return cnt; +		} else {	/* qlen <= cnt */ +			int	r; + +			memcpy(buf, q, qlen); +			r = qlen; +			qlen = 0; +			return r; +		} +	} +	if (beat_get_term_char(vtermno, &got, +		((u64 *)q), ((u64 *)q) + 1) == 0) { +		qlen = got; +		goto again; +	} +	return 0; +} + +static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt) +{ +	unsigned long kb[2]; +	int rest, nlen; + +	for (rest = cnt; rest > 0; rest -= nlen) { +		nlen = (rest > 16) ? 16 : rest; +		memcpy(kb, buf, nlen); +		beat_put_term_char(vtermno, nlen, kb[0], kb[1]); +		buf += nlen; +	} +	return cnt; +} + +static const struct hv_ops hvc_beat_get_put_ops = { +	.get_chars = hvc_beat_get_chars, +	.put_chars = hvc_beat_put_chars, +}; + +static int hvc_beat_useit = 1; + +static int hvc_beat_config(char *p) +{ +	hvc_beat_useit = simple_strtoul(p, NULL, 0); +	return 0; +} + +static int __init hvc_beat_console_init(void) +{ +	if (hvc_beat_useit && of_machine_is_compatible("Beat")) { +		hvc_instantiate(0, 0, &hvc_beat_get_put_ops); +	} +	return 0; +} + +/* temp */ +static int __init hvc_beat_init(void) +{ +	struct hvc_struct *hp; + +	if (!firmware_has_feature(FW_FEATURE_BEAT)) +		return -ENODEV; + +	hp = hvc_alloc(0, 0, &hvc_beat_get_put_ops, 16); +	if (IS_ERR(hp)) +		return PTR_ERR(hp); +	hvc_beat_dev = hp; +	return 0; +} + +static void __exit hvc_beat_exit(void) +{ +	if (hvc_beat_dev) +		hvc_remove(hvc_beat_dev); +} + +module_init(hvc_beat_init); +module_exit(hvc_beat_exit); + +__setup("hvc_beat=", hvc_beat_config); + +console_initcall(hvc_beat_console_init); diff --git a/drivers/tty/hvc/hvc_bfin_jtag.c b/drivers/tty/hvc/hvc_bfin_jtag.c new file mode 100644 index 00000000000..31d6cc6a77a --- /dev/null +++ b/drivers/tty/hvc/hvc_bfin_jtag.c @@ -0,0 +1,105 @@ +/* + * Console via Blackfin JTAG Communication + * + * Copyright 2008-2011 Analog Devices Inc. + * + * Enter bugs at http://blackfin.uclinux.org/ + * + * Licensed under the GPL-2 or later. + */ + +#include <linux/console.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/moduleparam.h> +#include <linux/types.h> + +#include "hvc_console.h" + +/* See the Debug/Emulation chapter in the HRM */ +#define EMUDOF   0x00000001	/* EMUDAT_OUT full & valid */ +#define EMUDIF   0x00000002	/* EMUDAT_IN full & valid */ +#define EMUDOOVF 0x00000004	/* EMUDAT_OUT overflow */ +#define EMUDIOVF 0x00000008	/* EMUDAT_IN overflow */ + +/* Helper functions to glue the register API to simple C operations */ +static inline uint32_t bfin_write_emudat(uint32_t emudat) +{ +	__asm__ __volatile__("emudat = %0;" : : "d"(emudat)); +	return emudat; +} + +static inline uint32_t bfin_read_emudat(void) +{ +	uint32_t emudat; +	__asm__ __volatile__("%0 = emudat;" : "=d"(emudat)); +	return emudat; +} + +/* Send data to the host */ +static int hvc_bfin_put_chars(uint32_t vt, const char *buf, int count) +{ +	static uint32_t outbound_len; +	uint32_t emudat; +	int ret; + +	if (bfin_read_DBGSTAT() & EMUDOF) +		return 0; + +	if (!outbound_len) { +		outbound_len = count; +		bfin_write_emudat(outbound_len); +		return 0; +	} + +	ret = min(outbound_len, (uint32_t)4); +	memcpy(&emudat, buf, ret); +	bfin_write_emudat(emudat); +	outbound_len -= ret; + +	return ret; +} + +/* Receive data from the host */ +static int hvc_bfin_get_chars(uint32_t vt, char *buf, int count) +{ +	static uint32_t inbound_len; +	uint32_t emudat; +	int ret; + +	if (!(bfin_read_DBGSTAT() & EMUDIF)) +		return 0; +	emudat = bfin_read_emudat(); + +	if (!inbound_len) { +		inbound_len = emudat; +		return 0; +	} + +	ret = min(inbound_len, (uint32_t)4); +	memcpy(buf, &emudat, ret); +	inbound_len -= ret; + +	return ret; +} + +/* Glue the HVC layers to the Blackfin layers */ +static const struct hv_ops hvc_bfin_get_put_ops = { +	.get_chars = hvc_bfin_get_chars, +	.put_chars = hvc_bfin_put_chars, +}; + +static int __init hvc_bfin_console_init(void) +{ +	hvc_instantiate(0, 0, &hvc_bfin_get_put_ops); +	return 0; +} +console_initcall(hvc_bfin_console_init); + +static int __init hvc_bfin_init(void) +{ +	hvc_alloc(0, 0, &hvc_bfin_get_put_ops, 128); +	return 0; +} +device_initcall(hvc_bfin_init); diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c new file mode 100644 index 00000000000..4fcec1d793a --- /dev/null +++ b/drivers/tty/hvc/hvc_console.c @@ -0,0 +1,1022 @@ +/* + * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM + * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM + * Copyright (C) 2004 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. + * Copyright (C) 2004 IBM Corporation + * + * Additional Author(s): + *  Ryan S. Arnold <rsa@us.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + *  + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + *  + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA + */ + +#include <linux/console.h> +#include <linux/cpumask.h> +#include <linux/init.h> +#include <linux/kbd_kern.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/major.h> +#include <linux/atomic.h> +#include <linux/sysrq.h> +#include <linux/tty.h> +#include <linux/tty_flip.h> +#include <linux/sched.h> +#include <linux/spinlock.h> +#include <linux/delay.h> +#include <linux/freezer.h> +#include <linux/slab.h> +#include <linux/serial_core.h> + +#include <asm/uaccess.h> + +#include "hvc_console.h" + +#define HVC_MAJOR	229 +#define HVC_MINOR	0 + +/* + * Wait this long per iteration while trying to push buffered data to the + * hypervisor before allowing the tty to complete a close operation. + */ +#define HVC_CLOSE_WAIT (HZ/100) /* 1/10 of a second */ + +/* + * These sizes are most efficient for vio, because they are the + * native transfer size. We could make them selectable in the + * future to better deal with backends that want other buffer sizes. + */ +#define N_OUTBUF	16 +#define N_INBUF		16 + +#define __ALIGNED__ __attribute__((__aligned__(sizeof(long)))) + +static struct tty_driver *hvc_driver; +static struct task_struct *hvc_task; + +/* Picks up late kicks after list walk but before schedule() */ +static int hvc_kicked; + +/* hvc_init is triggered from hvc_alloc, i.e. only when actually used */ +static atomic_t hvc_needs_init __read_mostly = ATOMIC_INIT(-1); + +static int hvc_init(void); + +#ifdef CONFIG_MAGIC_SYSRQ +static int sysrq_pressed; +#endif + +/* dynamic list of hvc_struct instances */ +static LIST_HEAD(hvc_structs); + +/* + * Protect the list of hvc_struct instances from inserts and removals during + * list traversal. + */ +static DEFINE_SPINLOCK(hvc_structs_lock); + +/* + * This value is used to assign a tty->index value to a hvc_struct based + * upon order of exposure via hvc_probe(), when we can not match it to + * a console candidate registered with hvc_instantiate(). + */ +static int last_hvc = -1; + +/* + * Do not call this function with either the hvc_structs_lock or the hvc_struct + * lock held.  If successful, this function increments the kref reference + * count against the target hvc_struct so it should be released when finished. + */ +static struct hvc_struct *hvc_get_by_index(int index) +{ +	struct hvc_struct *hp; +	unsigned long flags; + +	spin_lock(&hvc_structs_lock); + +	list_for_each_entry(hp, &hvc_structs, next) { +		spin_lock_irqsave(&hp->lock, flags); +		if (hp->index == index) { +			tty_port_get(&hp->port); +			spin_unlock_irqrestore(&hp->lock, flags); +			spin_unlock(&hvc_structs_lock); +			return hp; +		} +		spin_unlock_irqrestore(&hp->lock, flags); +	} +	hp = NULL; + +	spin_unlock(&hvc_structs_lock); +	return hp; +} + + +/* + * Initial console vtermnos for console API usage prior to full console + * initialization.  Any vty adapter outside this range will not have usable + * console interfaces but can still be used as a tty device.  This has to be + * static because kmalloc will not work during early console init. + */ +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES]; +static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] = +	{[0 ... MAX_NR_HVC_CONSOLES - 1] = -1}; + +/* + * Console APIs, NOT TTY.  These APIs are available immediately when + * hvc_console_setup() finds adapters. + */ + +static void hvc_console_print(struct console *co, const char *b, +			      unsigned count) +{ +	char c[N_OUTBUF] __ALIGNED__; +	unsigned i = 0, n = 0; +	int r, donecr = 0, index = co->index; + +	/* Console access attempt outside of acceptable console range. */ +	if (index >= MAX_NR_HVC_CONSOLES) +		return; + +	/* This console adapter was removed so it is not usable. */ +	if (vtermnos[index] == -1) +		return; + +	while (count > 0 || i > 0) { +		if (count > 0 && i < sizeof(c)) { +			if (b[n] == '\n' && !donecr) { +				c[i++] = '\r'; +				donecr = 1; +			} else { +				c[i++] = b[n++]; +				donecr = 0; +				--count; +			} +		} else { +			r = cons_ops[index]->put_chars(vtermnos[index], c, i); +			if (r <= 0) { +				/* throw away characters on error +				 * but spin in case of -EAGAIN */ +				if (r != -EAGAIN) +					i = 0; +			} else if (r > 0) { +				i -= r; +				if (i > 0) +					memmove(c, c+r, i); +			} +		} +	} +} + +static struct tty_driver *hvc_console_device(struct console *c, int *index) +{ +	if (vtermnos[c->index] == -1) +		return NULL; + +	*index = c->index; +	return hvc_driver; +} + +static int hvc_console_setup(struct console *co, char *options) +{	 +	if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES) +		return -ENODEV; + +	if (vtermnos[co->index] == -1) +		return -ENODEV; + +	return 0; +} + +static struct console hvc_console = { +	.name		= "hvc", +	.write		= hvc_console_print, +	.device		= hvc_console_device, +	.setup		= hvc_console_setup, +	.flags		= CON_PRINTBUFFER, +	.index		= -1, +}; + +/* + * Early console initialization.  Precedes driver initialization. + * + * (1) we are first, and the user specified another driver + * -- index will remain -1 + * (2) we are first and the user specified no driver + * -- index will be set to 0, then we will fail setup. + * (3)  we are first and the user specified our driver + * -- index will be set to user specified driver, and we will fail + * (4) we are after driver, and this initcall will register us + * -- if the user didn't specify a driver then the console will match + * + * Note that for cases 2 and 3, we will match later when the io driver + * calls hvc_instantiate() and call register again. + */ +static int __init hvc_console_init(void) +{ +	register_console(&hvc_console); +	return 0; +} +console_initcall(hvc_console_init); + +/* callback when the kboject ref count reaches zero. */ +static void hvc_port_destruct(struct tty_port *port) +{ +	struct hvc_struct *hp = container_of(port, struct hvc_struct, port); +	unsigned long flags; + +	spin_lock(&hvc_structs_lock); + +	spin_lock_irqsave(&hp->lock, flags); +	list_del(&(hp->next)); +	spin_unlock_irqrestore(&hp->lock, flags); + +	spin_unlock(&hvc_structs_lock); + +	kfree(hp); +} + +static void hvc_check_console(int index) +{ +	/* Already enabled, bail out */ +	if (hvc_console.flags & CON_ENABLED) +		return; + + 	/* If this index is what the user requested, then register +	 * now (setup won't fail at this point).  It's ok to just +	 * call register again if previously .setup failed. +	 */ +	if (index == hvc_console.index) +		register_console(&hvc_console); +} + +/* + * hvc_instantiate() is an early console discovery method which locates + * consoles * prior to the vio subsystem discovering them.  Hotplugged + * vty adapters do NOT get an hvc_instantiate() callback since they + * appear after early console init. + */ +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops) +{ +	struct hvc_struct *hp; + +	if (index < 0 || index >= MAX_NR_HVC_CONSOLES) +		return -1; + +	if (vtermnos[index] != -1) +		return -1; + +	/* make sure no no tty has been registered in this index */ +	hp = hvc_get_by_index(index); +	if (hp) { +		tty_port_put(&hp->port); +		return -1; +	} + +	vtermnos[index] = vtermno; +	cons_ops[index] = ops; + +	/* reserve all indices up to and including this index */ +	if (last_hvc < index) +		last_hvc = index; + +	/* check if we need to re-register the kernel console */ +	hvc_check_console(index); + +	return 0; +} +EXPORT_SYMBOL_GPL(hvc_instantiate); + +/* Wake the sleeping khvcd */ +void hvc_kick(void) +{ +	hvc_kicked = 1; +	wake_up_process(hvc_task); +} +EXPORT_SYMBOL_GPL(hvc_kick); + +static void hvc_unthrottle(struct tty_struct *tty) +{ +	hvc_kick(); +} + +static int hvc_install(struct tty_driver *driver, struct tty_struct *tty) +{ +	struct hvc_struct *hp; +	int rc; + +	/* Auto increments kref reference if found. */ +	if (!(hp = hvc_get_by_index(tty->index))) +		return -ENODEV; + +	tty->driver_data = hp; + +	rc = tty_port_install(&hp->port, driver, tty); +	if (rc) +		tty_port_put(&hp->port); +	return rc; +} + +/* + * The TTY interface won't be used until after the vio layer has exposed the vty + * adapter to the kernel. + */ +static int hvc_open(struct tty_struct *tty, struct file * filp) +{ +	struct hvc_struct *hp = tty->driver_data; +	unsigned long flags; +	int rc = 0; + +	spin_lock_irqsave(&hp->port.lock, flags); +	/* Check and then increment for fast path open. */ +	if (hp->port.count++ > 0) { +		spin_unlock_irqrestore(&hp->port.lock, flags); +		hvc_kick(); +		return 0; +	} /* else count == 0 */ +	spin_unlock_irqrestore(&hp->port.lock, flags); + +	tty_port_tty_set(&hp->port, tty); + +	if (hp->ops->notifier_add) +		rc = hp->ops->notifier_add(hp, hp->data); + +	/* +	 * If the notifier fails we return an error.  The tty layer +	 * will call hvc_close() after a failed open but we don't want to clean +	 * up there so we'll clean up here and clear out the previously set +	 * tty fields and return the kref reference. +	 */ +	if (rc) { +		tty_port_tty_set(&hp->port, NULL); +		tty->driver_data = NULL; +		tty_port_put(&hp->port); +		printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc); +	} else +		/* We are ready... raise DTR/RTS */ +		if (C_BAUD(tty)) +			if (hp->ops->dtr_rts) +				hp->ops->dtr_rts(hp, 1); + +	/* Force wakeup of the polling thread */ +	hvc_kick(); + +	return rc; +} + +static void hvc_close(struct tty_struct *tty, struct file * filp) +{ +	struct hvc_struct *hp; +	unsigned long flags; + +	if (tty_hung_up_p(filp)) +		return; + +	/* +	 * No driver_data means that this close was issued after a failed +	 * hvc_open by the tty layer's release_dev() function and we can just +	 * exit cleanly because the kref reference wasn't made. +	 */ +	if (!tty->driver_data) +		return; + +	hp = tty->driver_data; + +	spin_lock_irqsave(&hp->port.lock, flags); + +	if (--hp->port.count == 0) { +		spin_unlock_irqrestore(&hp->port.lock, flags); +		/* We are done with the tty pointer now. */ +		tty_port_tty_set(&hp->port, NULL); + +		if (C_HUPCL(tty)) +			if (hp->ops->dtr_rts) +				hp->ops->dtr_rts(hp, 0); + +		if (hp->ops->notifier_del) +			hp->ops->notifier_del(hp, hp->data); + +		/* cancel pending tty resize work */ +		cancel_work_sync(&hp->tty_resize); + +		/* +		 * Chain calls chars_in_buffer() and returns immediately if +		 * there is no buffered data otherwise sleeps on a wait queue +		 * waking periodically to check chars_in_buffer(). +		 */ +		tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT); +	} else { +		if (hp->port.count < 0) +			printk(KERN_ERR "hvc_close %X: oops, count is %d\n", +				hp->vtermno, hp->port.count); +		spin_unlock_irqrestore(&hp->port.lock, flags); +	} +} + +static void hvc_cleanup(struct tty_struct *tty) +{ +	struct hvc_struct *hp = tty->driver_data; + +	tty_port_put(&hp->port); +} + +static void hvc_hangup(struct tty_struct *tty) +{ +	struct hvc_struct *hp = tty->driver_data; +	unsigned long flags; + +	if (!hp) +		return; + +	/* cancel pending tty resize work */ +	cancel_work_sync(&hp->tty_resize); + +	spin_lock_irqsave(&hp->port.lock, flags); + +	/* +	 * The N_TTY line discipline has problems such that in a close vs +	 * open->hangup case this can be called after the final close so prevent +	 * that from happening for now. +	 */ +	if (hp->port.count <= 0) { +		spin_unlock_irqrestore(&hp->port.lock, flags); +		return; +	} + +	hp->port.count = 0; +	spin_unlock_irqrestore(&hp->port.lock, flags); +	tty_port_tty_set(&hp->port, NULL); + +	hp->n_outbuf = 0; + +	if (hp->ops->notifier_hangup) +		hp->ops->notifier_hangup(hp, hp->data); +} + +/* + * Push buffered characters whether they were just recently buffered or waiting + * on a blocked hypervisor.  Call this function with hp->lock held. + */ +static int hvc_push(struct hvc_struct *hp) +{ +	int n; + +	n = hp->ops->put_chars(hp->vtermno, hp->outbuf, hp->n_outbuf); +	if (n <= 0) { +		if (n == 0 || n == -EAGAIN) { +			hp->do_wakeup = 1; +			return 0; +		} +		/* throw away output on error; this happens when +		   there is no session connected to the vterm. */ +		hp->n_outbuf = 0; +	} else +		hp->n_outbuf -= n; +	if (hp->n_outbuf > 0) +		memmove(hp->outbuf, hp->outbuf + n, hp->n_outbuf); +	else +		hp->do_wakeup = 1; + +	return n; +} + +static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count) +{ +	struct hvc_struct *hp = tty->driver_data; +	unsigned long flags; +	int rsize, written = 0; + +	/* This write was probably executed during a tty close. */ +	if (!hp) +		return -EPIPE; + +	/* FIXME what's this (unprotected) check for? */ +	if (hp->port.count <= 0) +		return -EIO; + +	spin_lock_irqsave(&hp->lock, flags); + +	/* Push pending writes */ +	if (hp->n_outbuf > 0) +		hvc_push(hp); + +	while (count > 0 && (rsize = hp->outbuf_size - hp->n_outbuf) > 0) { +		if (rsize > count) +			rsize = count; +		memcpy(hp->outbuf + hp->n_outbuf, buf, rsize); +		count -= rsize; +		buf += rsize; +		hp->n_outbuf += rsize; +		written += rsize; +		hvc_push(hp); +	} +	spin_unlock_irqrestore(&hp->lock, flags); + +	/* +	 * Racy, but harmless, kick thread if there is still pending data. +	 */ +	if (hp->n_outbuf) +		hvc_kick(); + +	return written; +} + +/** + * hvc_set_winsz() - Resize the hvc tty terminal window. + * @work:	work structure. + * + * The routine shall not be called within an atomic context because it + * might sleep. + * + * Locking:	hp->lock + */ +static void hvc_set_winsz(struct work_struct *work) +{ +	struct hvc_struct *hp; +	unsigned long hvc_flags; +	struct tty_struct *tty; +	struct winsize ws; + +	hp = container_of(work, struct hvc_struct, tty_resize); + +	tty = tty_port_tty_get(&hp->port); +	if (!tty) +		return; + +	spin_lock_irqsave(&hp->lock, hvc_flags); +	ws = hp->ws; +	spin_unlock_irqrestore(&hp->lock, hvc_flags); + +	tty_do_resize(tty, &ws); +	tty_kref_put(tty); +} + +/* + * This is actually a contract between the driver and the tty layer outlining + * how much write room the driver can guarantee will be sent OR BUFFERED.  This + * driver MUST honor the return value. + */ +static int hvc_write_room(struct tty_struct *tty) +{ +	struct hvc_struct *hp = tty->driver_data; + +	if (!hp) +		return 0; + +	return hp->outbuf_size - hp->n_outbuf; +} + +static int hvc_chars_in_buffer(struct tty_struct *tty) +{ +	struct hvc_struct *hp = tty->driver_data; + +	if (!hp) +		return 0; +	return hp->n_outbuf; +} + +/* + * timeout will vary between the MIN and MAX values defined here.  By default + * and during console activity we will use a default MIN_TIMEOUT of 10.  When + * the console is idle, we increase the timeout value on each pass through + * msleep until we reach the max.  This may be noticeable as a brief (average + * one second) delay on the console before the console responds to input when + * there has been no input for some time. + */ +#define MIN_TIMEOUT		(10) +#define MAX_TIMEOUT		(2000) +static u32 timeout = MIN_TIMEOUT; + +#define HVC_POLL_READ	0x00000001 +#define HVC_POLL_WRITE	0x00000002 + +int hvc_poll(struct hvc_struct *hp) +{ +	struct tty_struct *tty; +	int i, n, poll_mask = 0; +	char buf[N_INBUF] __ALIGNED__; +	unsigned long flags; +	int read_total = 0; +	int written_total = 0; + +	spin_lock_irqsave(&hp->lock, flags); + +	/* Push pending writes */ +	if (hp->n_outbuf > 0) +		written_total = hvc_push(hp); + +	/* Reschedule us if still some write pending */ +	if (hp->n_outbuf > 0) { +		poll_mask |= HVC_POLL_WRITE; +		/* If hvc_push() was not able to write, sleep a few msecs */ +		timeout = (written_total) ? 0 : MIN_TIMEOUT; +	} + +	/* No tty attached, just skip */ +	tty = tty_port_tty_get(&hp->port); +	if (tty == NULL) +		goto bail; + +	/* Now check if we can get data (are we throttled ?) */ +	if (test_bit(TTY_THROTTLED, &tty->flags)) +		goto throttled; + +	/* If we aren't notifier driven and aren't throttled, we always +	 * request a reschedule +	 */ +	if (!hp->irq_requested) +		poll_mask |= HVC_POLL_READ; + +	/* Read data if any */ +	for (;;) { +		int count = tty_buffer_request_room(&hp->port, N_INBUF); + +		/* If flip is full, just reschedule a later read */ +		if (count == 0) { +			poll_mask |= HVC_POLL_READ; +			break; +		} + +		n = hp->ops->get_chars(hp->vtermno, buf, count); +		if (n <= 0) { +			/* Hangup the tty when disconnected from host */ +			if (n == -EPIPE) { +				spin_unlock_irqrestore(&hp->lock, flags); +				tty_hangup(tty); +				spin_lock_irqsave(&hp->lock, flags); +			} else if ( n == -EAGAIN ) { +				/* +				 * Some back-ends can only ensure a certain min +				 * num of bytes read, which may be > 'count'. +				 * Let the tty clear the flip buff to make room. +				 */ +				poll_mask |= HVC_POLL_READ; +			} +			break; +		} +		for (i = 0; i < n; ++i) { +#ifdef CONFIG_MAGIC_SYSRQ +			if (hp->index == hvc_console.index) { +				/* Handle the SysRq Hack */ +				/* XXX should support a sequence */ +				if (buf[i] == '\x0f') {	/* ^O */ +					/* if ^O is pressed again, reset +					 * sysrq_pressed and flip ^O char */ +					sysrq_pressed = !sysrq_pressed; +					if (sysrq_pressed) +						continue; +				} else if (sysrq_pressed) { +					handle_sysrq(buf[i]); +					sysrq_pressed = 0; +					continue; +				} +			} +#endif /* CONFIG_MAGIC_SYSRQ */ +			tty_insert_flip_char(&hp->port, buf[i], 0); +		} + +		read_total += n; +	} + throttled: +	/* Wakeup write queue if necessary */ +	if (hp->do_wakeup) { +		hp->do_wakeup = 0; +		tty_wakeup(tty); +	} + bail: +	spin_unlock_irqrestore(&hp->lock, flags); + +	if (read_total) { +		/* Activity is occurring, so reset the polling backoff value to +		   a minimum for performance. */ +		timeout = MIN_TIMEOUT; + +		tty_flip_buffer_push(&hp->port); +	} +	tty_kref_put(tty); + +	return poll_mask; +} +EXPORT_SYMBOL_GPL(hvc_poll); + +/** + * __hvc_resize() - Update terminal window size information. + * @hp:		HVC console pointer + * @ws:		Terminal window size structure + * + * Stores the specified window size information in the hvc structure of @hp. + * The function schedule the tty resize update. + * + * Locking:	Locking free; the function MUST be called holding hp->lock + */ +void __hvc_resize(struct hvc_struct *hp, struct winsize ws) +{ +	hp->ws = ws; +	schedule_work(&hp->tty_resize); +} +EXPORT_SYMBOL_GPL(__hvc_resize); + +/* + * This kthread is either polling or interrupt driven.  This is determined by + * calling hvc_poll() who determines whether a console adapter support + * interrupts. + */ +static int khvcd(void *unused) +{ +	int poll_mask; +	struct hvc_struct *hp; + +	set_freezable(); +	do { +		poll_mask = 0; +		hvc_kicked = 0; +		try_to_freeze(); +		wmb(); +		if (!cpus_are_in_xmon()) { +			spin_lock(&hvc_structs_lock); +			list_for_each_entry(hp, &hvc_structs, next) { +				poll_mask |= hvc_poll(hp); +			} +			spin_unlock(&hvc_structs_lock); +		} else +			poll_mask |= HVC_POLL_READ; +		if (hvc_kicked) +			continue; +		set_current_state(TASK_INTERRUPTIBLE); +		if (!hvc_kicked) { +			if (poll_mask == 0) +				schedule(); +			else { +				unsigned long j_timeout; + +				if (timeout < MAX_TIMEOUT) +					timeout += (timeout >> 6) + 1; + +				/* +				 * We don't use msleep_interruptible otherwise +				 * "kick" will fail to wake us up +				 */ +				j_timeout = msecs_to_jiffies(timeout) + 1; +				schedule_timeout_interruptible(j_timeout); +			} +		} +		__set_current_state(TASK_RUNNING); +	} while (!kthread_should_stop()); + +	return 0; +} + +static int hvc_tiocmget(struct tty_struct *tty) +{ +	struct hvc_struct *hp = tty->driver_data; + +	if (!hp || !hp->ops->tiocmget) +		return -EINVAL; +	return hp->ops->tiocmget(hp); +} + +static int hvc_tiocmset(struct tty_struct *tty, +			unsigned int set, unsigned int clear) +{ +	struct hvc_struct *hp = tty->driver_data; + +	if (!hp || !hp->ops->tiocmset) +		return -EINVAL; +	return hp->ops->tiocmset(hp, set, clear); +} + +#ifdef CONFIG_CONSOLE_POLL +static int hvc_poll_init(struct tty_driver *driver, int line, char *options) +{ +	return 0; +} + +static int hvc_poll_get_char(struct tty_driver *driver, int line) +{ +	struct tty_struct *tty = driver->ttys[0]; +	struct hvc_struct *hp = tty->driver_data; +	int n; +	char ch; + +	n = hp->ops->get_chars(hp->vtermno, &ch, 1); + +	if (n == 0) +		return NO_POLL_CHAR; + +	return ch; +} + +static void hvc_poll_put_char(struct tty_driver *driver, int line, char ch) +{ +	struct tty_struct *tty = driver->ttys[0]; +	struct hvc_struct *hp = tty->driver_data; +	int n; + +	do { +		n = hp->ops->put_chars(hp->vtermno, &ch, 1); +	} while (n <= 0); +} +#endif + +static const struct tty_operations hvc_ops = { +	.install = hvc_install, +	.open = hvc_open, +	.close = hvc_close, +	.cleanup = hvc_cleanup, +	.write = hvc_write, +	.hangup = hvc_hangup, +	.unthrottle = hvc_unthrottle, +	.write_room = hvc_write_room, +	.chars_in_buffer = hvc_chars_in_buffer, +	.tiocmget = hvc_tiocmget, +	.tiocmset = hvc_tiocmset, +#ifdef CONFIG_CONSOLE_POLL +	.poll_init = hvc_poll_init, +	.poll_get_char = hvc_poll_get_char, +	.poll_put_char = hvc_poll_put_char, +#endif +}; + +static const struct tty_port_operations hvc_port_ops = { +	.destruct = hvc_port_destruct, +}; + +struct hvc_struct *hvc_alloc(uint32_t vtermno, int data, +			     const struct hv_ops *ops, +			     int outbuf_size) +{ +	struct hvc_struct *hp; +	int i; + +	/* We wait until a driver actually comes along */ +	if (atomic_inc_not_zero(&hvc_needs_init)) { +		int err = hvc_init(); +		if (err) +			return ERR_PTR(err); +	} + +	hp = kzalloc(ALIGN(sizeof(*hp), sizeof(long)) + outbuf_size, +			GFP_KERNEL); +	if (!hp) +		return ERR_PTR(-ENOMEM); + +	hp->vtermno = vtermno; +	hp->data = data; +	hp->ops = ops; +	hp->outbuf_size = outbuf_size; +	hp->outbuf = &((char *)hp)[ALIGN(sizeof(*hp), sizeof(long))]; + +	tty_port_init(&hp->port); +	hp->port.ops = &hvc_port_ops; + +	INIT_WORK(&hp->tty_resize, hvc_set_winsz); +	spin_lock_init(&hp->lock); +	spin_lock(&hvc_structs_lock); + +	/* +	 * find index to use: +	 * see if this vterm id matches one registered for console. +	 */ +	for (i=0; i < MAX_NR_HVC_CONSOLES; i++) +		if (vtermnos[i] == hp->vtermno && +		    cons_ops[i] == hp->ops) +			break; + +	/* no matching slot, just use a counter */ +	if (i >= MAX_NR_HVC_CONSOLES) +		i = ++last_hvc; + +	hp->index = i; +	cons_ops[i] = ops; +	vtermnos[i] = vtermno; + +	list_add_tail(&(hp->next), &hvc_structs); +	spin_unlock(&hvc_structs_lock); + +	/* check if we need to re-register the kernel console */ +	hvc_check_console(i); + +	return hp; +} +EXPORT_SYMBOL_GPL(hvc_alloc); + +int hvc_remove(struct hvc_struct *hp) +{ +	unsigned long flags; +	struct tty_struct *tty; + +	tty = tty_port_tty_get(&hp->port); + +	spin_lock_irqsave(&hp->lock, flags); +	if (hp->index < MAX_NR_HVC_CONSOLES) { +		console_lock(); +		vtermnos[hp->index] = -1; +		cons_ops[hp->index] = NULL; +		console_unlock(); +	} + +	/* Don't whack hp->irq because tty_hangup() will need to free the irq. */ + +	spin_unlock_irqrestore(&hp->lock, flags); + +	/* +	 * We 'put' the instance that was grabbed when the kref instance +	 * was initialized using kref_init().  Let the last holder of this +	 * kref cause it to be removed, which will probably be the tty_vhangup +	 * below. +	 */ +	tty_port_put(&hp->port); + +	/* +	 * This function call will auto chain call hvc_hangup. +	 */ +	if (tty) { +		tty_vhangup(tty); +		tty_kref_put(tty); +	} +	return 0; +} +EXPORT_SYMBOL_GPL(hvc_remove); + +/* Driver initialization: called as soon as someone uses hvc_alloc(). */ +static int hvc_init(void) +{ +	struct tty_driver *drv; +	int err; + +	/* We need more than hvc_count adapters due to hotplug additions. */ +	drv = alloc_tty_driver(HVC_ALLOC_TTY_ADAPTERS); +	if (!drv) { +		err = -ENOMEM; +		goto out; +	} + +	drv->driver_name = "hvc"; +	drv->name = "hvc"; +	drv->major = HVC_MAJOR; +	drv->minor_start = HVC_MINOR; +	drv->type = TTY_DRIVER_TYPE_SYSTEM; +	drv->init_termios = tty_std_termios; +	drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS; +	tty_set_operations(drv, &hvc_ops); + +	/* Always start the kthread because there can be hotplug vty adapters +	 * added later. */ +	hvc_task = kthread_run(khvcd, NULL, "khvcd"); +	if (IS_ERR(hvc_task)) { +		printk(KERN_ERR "Couldn't create kthread for console.\n"); +		err = PTR_ERR(hvc_task); +		goto put_tty; +	} + +	err = tty_register_driver(drv); +	if (err) { +		printk(KERN_ERR "Couldn't register hvc console driver\n"); +		goto stop_thread; +	} + +	/* +	 * Make sure tty is fully registered before allowing it to be +	 * found by hvc_console_device. +	 */ +	smp_mb(); +	hvc_driver = drv; +	return 0; + +stop_thread: +	kthread_stop(hvc_task); +	hvc_task = NULL; +put_tty: +	put_tty_driver(drv); +out: +	return err; +} + +/* This isn't particularly necessary due to this being a console driver + * but it is nice to be thorough. + */ +static void __exit hvc_exit(void) +{ +	if (hvc_driver) { +		kthread_stop(hvc_task); + +		tty_unregister_driver(hvc_driver); +		/* return tty_struct instances allocated in hvc_init(). */ +		put_tty_driver(hvc_driver); +		unregister_console(&hvc_console); +	} +} +module_exit(hvc_exit); diff --git a/drivers/tty/hvc/hvc_console.h b/drivers/tty/hvc/hvc_console.h new file mode 100644 index 00000000000..91310198082 --- /dev/null +++ b/drivers/tty/hvc/hvc_console.h @@ -0,0 +1,124 @@ +/* + * hvc_console.h + * Copyright (C) 2005 IBM Corporation + * + * Author(s): + * 	Ryan S. Arnold <rsa@us.ibm.com> + * + * hvc_console header information: + *      moved here from arch/powerpc/include/asm/hvconsole.h + *      and drivers/char/hvc_console.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA + */ + +#ifndef HVC_CONSOLE_H +#define HVC_CONSOLE_H +#include <linux/kref.h> +#include <linux/tty.h> +#include <linux/spinlock.h> + +/* + * This is the max number of console adapters that can/will be found as + * console devices on first stage console init.  Any number beyond this range + * can't be used as a console device but is still a valid tty device. + */ +#define MAX_NR_HVC_CONSOLES	16 + +/* + * The Linux TTY code does not support dynamic addition of tty derived devices + * so we need to know how many tty devices we might need when space is allocated + * for the tty device.  Since this driver supports hotplug of vty adapters we + * need to make sure we have enough allocated. + */ +#define HVC_ALLOC_TTY_ADAPTERS	8 + +struct hvc_struct { +	struct tty_port port; +	spinlock_t lock; +	int index; +	int do_wakeup; +	char *outbuf; +	int outbuf_size; +	int n_outbuf; +	uint32_t vtermno; +	const struct hv_ops *ops; +	int irq_requested; +	int data; +	struct winsize ws; +	struct work_struct tty_resize; +	struct list_head next; +}; + +/* implemented by a low level driver */ +struct hv_ops { +	int (*get_chars)(uint32_t vtermno, char *buf, int count); +	int (*put_chars)(uint32_t vtermno, const char *buf, int count); + +	/* Callbacks for notification. Called in open, close and hangup */ +	int (*notifier_add)(struct hvc_struct *hp, int irq); +	void (*notifier_del)(struct hvc_struct *hp, int irq); +	void (*notifier_hangup)(struct hvc_struct *hp, int irq); + +	/* tiocmget/set implementation */ +	int (*tiocmget)(struct hvc_struct *hp); +	int (*tiocmset)(struct hvc_struct *hp, unsigned int set, unsigned int clear); + +	/* Callbacks to handle tty ports */ +	void (*dtr_rts)(struct hvc_struct *hp, int raise); +}; + +/* Register a vterm and a slot index for use as a console (console_init) */ +extern int hvc_instantiate(uint32_t vtermno, int index, +			   const struct hv_ops *ops); + +/* register a vterm for hvc tty operation (module_init or hotplug add) */ +extern struct hvc_struct * hvc_alloc(uint32_t vtermno, int data, +				     const struct hv_ops *ops, int outbuf_size); +/* remove a vterm from hvc tty operation (module_exit or hotplug remove) */ +extern int hvc_remove(struct hvc_struct *hp); + +/* data available */ +int hvc_poll(struct hvc_struct *hp); +void hvc_kick(void); + +/* Resize hvc tty terminal window */ +extern void __hvc_resize(struct hvc_struct *hp, struct winsize ws); + +static inline void hvc_resize(struct hvc_struct *hp, struct winsize ws) +{ +	unsigned long flags; + +	spin_lock_irqsave(&hp->lock, flags); +	__hvc_resize(hp, ws); +	spin_unlock_irqrestore(&hp->lock, flags); +} + +/* default notifier for irq based notification */ +extern int notifier_add_irq(struct hvc_struct *hp, int data); +extern void notifier_del_irq(struct hvc_struct *hp, int data); +extern void notifier_hangup_irq(struct hvc_struct *hp, int data); + + +#if defined(CONFIG_XMON) && defined(CONFIG_SMP) +#include <asm/xmon.h> +#else +static inline int cpus_are_in_xmon(void) +{ +	return 0; +} +#endif + +#endif // HVC_CONSOLE_H diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c new file mode 100644 index 00000000000..809920d80a6 --- /dev/null +++ b/drivers/tty/hvc/hvc_dcc.c @@ -0,0 +1,89 @@ +/* Copyright (c) 2010, 2014 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + */ + +#include <linux/init.h> + +#include <asm/dcc.h> +#include <asm/processor.h> + +#include "hvc_console.h" + +/* DCC Status Bits */ +#define DCC_STATUS_RX		(1 << 30) +#define DCC_STATUS_TX		(1 << 29) + +static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count) +{ +	int i; + +	for (i = 0; i < count; i++) { +		while (__dcc_getstatus() & DCC_STATUS_TX) +			cpu_relax(); + +		__dcc_putchar(buf[i]); +	} + +	return count; +} + +static int hvc_dcc_get_chars(uint32_t vt, char *buf, int count) +{ +	int i; + +	for (i = 0; i < count; ++i) +		if (__dcc_getstatus() & DCC_STATUS_RX) +			buf[i] = __dcc_getchar(); +		else +			break; + +	return i; +} + +static bool hvc_dcc_check(void) +{ +	unsigned long time = jiffies + (HZ / 10); + +	/* Write a test character to check if it is handled */ +	__dcc_putchar('\n'); + +	while (time_is_after_jiffies(time)) { +		if (!(__dcc_getstatus() & DCC_STATUS_TX)) +			return true; +	} + +	return false; +} + +static const struct hv_ops hvc_dcc_get_put_ops = { +	.get_chars = hvc_dcc_get_chars, +	.put_chars = hvc_dcc_put_chars, +}; + +static int __init hvc_dcc_console_init(void) +{ +	if (!hvc_dcc_check()) +		return -ENODEV; + +	hvc_instantiate(0, 0, &hvc_dcc_get_put_ops); +	return 0; +} +console_initcall(hvc_dcc_console_init); + +static int __init hvc_dcc_init(void) +{ +	if (!hvc_dcc_check()) +		return -ENODEV; + +	hvc_alloc(0, 0, &hvc_dcc_get_put_ops, 128); +	return 0; +} +device_initcall(hvc_dcc_init); diff --git a/drivers/tty/hvc/hvc_irq.c b/drivers/tty/hvc/hvc_irq.c new file mode 100644 index 00000000000..c9adb0559f6 --- /dev/null +++ b/drivers/tty/hvc/hvc_irq.c @@ -0,0 +1,49 @@ +/* + * Copyright IBM Corp. 2001,2008 + * + * This file contains the IRQ specific code for hvc_console + * + */ + +#include <linux/interrupt.h> + +#include "hvc_console.h" + +static irqreturn_t hvc_handle_interrupt(int irq, void *dev_instance) +{ +	/* if hvc_poll request a repoll, then kick the hvcd thread */ +	if (hvc_poll(dev_instance)) +		hvc_kick(); +	return IRQ_HANDLED; +} + +/* + * For IRQ based systems these callbacks can be used + */ +int notifier_add_irq(struct hvc_struct *hp, int irq) +{ +	int rc; + +	if (!irq) { +		hp->irq_requested = 0; +		return 0; +	} +	rc = request_irq(irq, hvc_handle_interrupt, 0, +			   "hvc_console", hp); +	if (!rc) +		hp->irq_requested = 1; +	return rc; +} + +void notifier_del_irq(struct hvc_struct *hp, int irq) +{ +	if (!hp->irq_requested) +		return; +	free_irq(irq, hp); +	hp->irq_requested = 0; +} + +void notifier_hangup_irq(struct hvc_struct *hp, int irq) +{ +	notifier_del_irq(hp, irq); +} diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c new file mode 100644 index 00000000000..ea74460f363 --- /dev/null +++ b/drivers/tty/hvc/hvc_iucv.c @@ -0,0 +1,1461 @@ +/* + * hvc_iucv.c - z/VM IUCV hypervisor console (HVC) device driver + * + * This HVC device driver provides terminal access using + * z/VM IUCV communication paths. + * + * Copyright IBM Corp. 2008, 2009 + * + * Author(s):	Hendrik Brueckner <brueckner@linux.vnet.ibm.com> + */ +#define KMSG_COMPONENT		"hvc_iucv" +#define pr_fmt(fmt)		KMSG_COMPONENT ": " fmt + +#include <linux/types.h> +#include <linux/slab.h> +#include <asm/ebcdic.h> +#include <linux/ctype.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/mempool.h> +#include <linux/moduleparam.h> +#include <linux/tty.h> +#include <linux/wait.h> +#include <net/iucv/iucv.h> + +#include "hvc_console.h" + + +/* General device driver settings */ +#define HVC_IUCV_MAGIC		0xc9e4c3e5 +#define MAX_HVC_IUCV_LINES	HVC_ALLOC_TTY_ADAPTERS +#define MEMPOOL_MIN_NR		(PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4) + +/* IUCV TTY message  */ +#define MSG_VERSION		0x02	/* Message version */ +#define MSG_TYPE_ERROR		0x01	/* Error message */ +#define MSG_TYPE_TERMENV	0x02	/* Terminal environment variable */ +#define MSG_TYPE_TERMIOS	0x04	/* Terminal IO struct update */ +#define MSG_TYPE_WINSIZE	0x08	/* Terminal window size update */ +#define MSG_TYPE_DATA		0x10	/* Terminal data */ + +struct iucv_tty_msg { +	u8	version;		/* Message version */ +	u8	type;			/* Message type */ +#define MSG_MAX_DATALEN		((u16)(~0)) +	u16	datalen;		/* Payload length */ +	u8	data[];			/* Payload buffer */ +} __attribute__((packed)); +#define MSG_SIZE(s)		((s) + offsetof(struct iucv_tty_msg, data)) + +enum iucv_state_t { +	IUCV_DISCONN	= 0, +	IUCV_CONNECTED	= 1, +	IUCV_SEVERED	= 2, +}; + +enum tty_state_t { +	TTY_CLOSED	= 0, +	TTY_OPENED	= 1, +}; + +struct hvc_iucv_private { +	struct hvc_struct	*hvc;		/* HVC struct reference */ +	u8			srv_name[8];	/* IUCV service name (ebcdic) */ +	unsigned char		is_console;	/* Linux console usage flag */ +	enum iucv_state_t	iucv_state;	/* IUCV connection status */ +	enum tty_state_t	tty_state;	/* TTY status */ +	struct iucv_path	*path;		/* IUCV path pointer */ +	spinlock_t		lock;		/* hvc_iucv_private lock */ +#define SNDBUF_SIZE		(PAGE_SIZE)	/* must be < MSG_MAX_DATALEN */ +	void			*sndbuf;	/* send buffer		  */ +	size_t			sndbuf_len;	/* length of send buffer  */ +#define QUEUE_SNDBUF_DELAY	(HZ / 25) +	struct delayed_work	sndbuf_work;	/* work: send iucv msg(s) */ +	wait_queue_head_t	sndbuf_waitq;	/* wait for send completion */ +	struct list_head	tty_outqueue;	/* outgoing IUCV messages */ +	struct list_head	tty_inqueue;	/* incoming IUCV messages */ +	struct device		*dev;		/* device structure */ +	u8			info_path[16];	/* IUCV path info (dev attr) */ +}; + +struct iucv_tty_buffer { +	struct list_head	list;	/* list pointer */ +	struct iucv_message	msg;	/* store an IUCV message */ +	size_t			offset;	/* data buffer offset */ +	struct iucv_tty_msg	*mbuf;	/* buffer to store input/output data */ +}; + +/* IUCV callback handler */ +static	int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]); +static void hvc_iucv_path_severed(struct iucv_path *, u8[16]); +static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *); +static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *); + + +/* Kernel module parameter: use one terminal device as default */ +static unsigned long hvc_iucv_devices = 1; + +/* Array of allocated hvc iucv tty lines... */ +static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES]; +#define IUCV_HVC_CON_IDX	(0) +/* List of z/VM user ID filter entries (struct iucv_vmid_filter) */ +#define MAX_VMID_FILTER		(500) +static size_t hvc_iucv_filter_size; +static void *hvc_iucv_filter; +static const char *hvc_iucv_filter_string; +static DEFINE_RWLOCK(hvc_iucv_filter_lock); + +/* Kmem cache and mempool for iucv_tty_buffer elements */ +static struct kmem_cache *hvc_iucv_buffer_cache; +static mempool_t *hvc_iucv_mempool; + +/* IUCV handler callback functions */ +static struct iucv_handler hvc_iucv_handler = { +	.path_pending  = hvc_iucv_path_pending, +	.path_severed  = hvc_iucv_path_severed, +	.message_complete = hvc_iucv_msg_complete, +	.message_pending  = hvc_iucv_msg_pending, +}; + + +/** + * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance. + * @num:	The HVC virtual terminal number (vtermno) + * + * This function returns the struct hvc_iucv_private instance that corresponds + * to the HVC virtual terminal number specified as parameter @num. + */ +static struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num) +{ +	if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices)) +		return NULL; +	return hvc_iucv_table[num - HVC_IUCV_MAGIC]; +} + +/** + * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element. + * @size:	Size of the internal buffer used to store data. + * @flags:	Memory allocation flags passed to mempool. + * + * This function allocates a new struct iucv_tty_buffer element and, optionally, + * allocates an internal data buffer with the specified size @size. + * The internal data buffer is always allocated with GFP_DMA which is + * required for receiving and sending data with IUCV. + * Note: The total message size arises from the internal buffer size and the + *	 members of the iucv_tty_msg structure. + * The function returns NULL if memory allocation has failed. + */ +static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags) +{ +	struct iucv_tty_buffer *bufp; + +	bufp = mempool_alloc(hvc_iucv_mempool, flags); +	if (!bufp) +		return NULL; +	memset(bufp, 0, sizeof(*bufp)); + +	if (size > 0) { +		bufp->msg.length = MSG_SIZE(size); +		bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA); +		if (!bufp->mbuf) { +			mempool_free(bufp, hvc_iucv_mempool); +			return NULL; +		} +		bufp->mbuf->version = MSG_VERSION; +		bufp->mbuf->type    = MSG_TYPE_DATA; +		bufp->mbuf->datalen = (u16) size; +	} +	return bufp; +} + +/** + * destroy_tty_buffer() - destroy struct iucv_tty_buffer element. + * @bufp:	Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL. + */ +static void destroy_tty_buffer(struct iucv_tty_buffer *bufp) +{ +	kfree(bufp->mbuf); +	mempool_free(bufp, hvc_iucv_mempool); +} + +/** + * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element. + * @list:	List containing struct iucv_tty_buffer elements. + */ +static void destroy_tty_buffer_list(struct list_head *list) +{ +	struct iucv_tty_buffer *ent, *next; + +	list_for_each_entry_safe(ent, next, list, list) { +		list_del(&ent->list); +		destroy_tty_buffer(ent); +	} +} + +/** + * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer. + * @priv:		Pointer to struct hvc_iucv_private + * @buf:		HVC buffer for writing received terminal data. + * @count:		HVC buffer size. + * @has_more_data:	Pointer to an int variable. + * + * The function picks up pending messages from the input queue and receives + * the message data that is then written to the specified buffer @buf. + * If the buffer size @count is less than the data message size, the + * message is kept on the input queue and @has_more_data is set to 1. + * If all message data has been written, the message is removed from + * the input queue. + * + * The function returns the number of bytes written to the terminal, zero if + * there are no pending data messages available or if there is no established + * IUCV path. + * If the IUCV path has been severed, then -EPIPE is returned to cause a + * hang up (that is issued by the HVC layer). + */ +static int hvc_iucv_write(struct hvc_iucv_private *priv, +			  char *buf, int count, int *has_more_data) +{ +	struct iucv_tty_buffer *rb; +	int written; +	int rc; + +	/* immediately return if there is no IUCV connection */ +	if (priv->iucv_state == IUCV_DISCONN) +		return 0; + +	/* if the IUCV path has been severed, return -EPIPE to inform the +	 * HVC layer to hang up the tty device. */ +	if (priv->iucv_state == IUCV_SEVERED) +		return -EPIPE; + +	/* check if there are pending messages */ +	if (list_empty(&priv->tty_inqueue)) +		return 0; + +	/* receive an iucv message and flip data to the tty (ldisc) */ +	rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list); + +	written = 0; +	if (!rb->mbuf) { /* message not yet received ... */ +		/* allocate mem to store msg data; if no memory is available +		 * then leave the buffer on the list and re-try later */ +		rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA); +		if (!rb->mbuf) +			return -ENOMEM; + +		rc = __iucv_message_receive(priv->path, &rb->msg, 0, +					    rb->mbuf, rb->msg.length, NULL); +		switch (rc) { +		case 0: /* Successful	    */ +			break; +		case 2:	/* No message found */ +		case 9: /* Message purged   */ +			break; +		default: +			written = -EIO; +		} +		/* remove buffer if an error has occurred or received data +		 * is not correct */ +		if (rc || (rb->mbuf->version != MSG_VERSION) || +			  (rb->msg.length    != MSG_SIZE(rb->mbuf->datalen))) +			goto out_remove_buffer; +	} + +	switch (rb->mbuf->type) { +	case MSG_TYPE_DATA: +		written = min_t(int, rb->mbuf->datalen - rb->offset, count); +		memcpy(buf, rb->mbuf->data + rb->offset, written); +		if (written < (rb->mbuf->datalen - rb->offset)) { +			rb->offset += written; +			*has_more_data = 1; +			goto out_written; +		} +		break; + +	case MSG_TYPE_WINSIZE: +		if (rb->mbuf->datalen != sizeof(struct winsize)) +			break; +		/* The caller must ensure that the hvc is locked, which +		 * is the case when called from hvc_iucv_get_chars() */ +		__hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data)); +		break; + +	case MSG_TYPE_ERROR:	/* ignored ... */ +	case MSG_TYPE_TERMENV:	/* ignored ... */ +	case MSG_TYPE_TERMIOS:	/* ignored ... */ +		break; +	} + +out_remove_buffer: +	list_del(&rb->list); +	destroy_tty_buffer(rb); +	*has_more_data = !list_empty(&priv->tty_inqueue); + +out_written: +	return written; +} + +/** + * hvc_iucv_get_chars() - HVC get_chars operation. + * @vtermno:	HVC virtual terminal number. + * @buf:	Pointer to a buffer to store data + * @count:	Size of buffer available for writing + * + * The HVC thread calls this method to read characters from the back-end. + * If an IUCV communication path has been established, pending IUCV messages + * are received and data is copied into buffer @buf up to @count bytes. + * + * Locking:	The routine gets called under an irqsave() spinlock; and + *		the routine locks the struct hvc_iucv_private->lock to call + *		helper functions. + */ +static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count) +{ +	struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno); +	int written; +	int has_more_data; + +	if (count <= 0) +		return 0; + +	if (!priv) +		return -ENODEV; + +	spin_lock(&priv->lock); +	has_more_data = 0; +	written = hvc_iucv_write(priv, buf, count, &has_more_data); +	spin_unlock(&priv->lock); + +	/* if there are still messages on the queue... schedule another run */ +	if (has_more_data) +		hvc_kick(); + +	return written; +} + +/** + * hvc_iucv_queue() - Buffer terminal data for sending. + * @priv:	Pointer to struct hvc_iucv_private instance. + * @buf:	Buffer containing data to send. + * @count:	Size of buffer and amount of data to send. + * + * The function queues data for sending. To actually send the buffered data, + * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY). + * The function returns the number of data bytes that has been buffered. + * + * If the device is not connected, data is ignored and the function returns + * @count. + * If the buffer is full, the function returns 0. + * If an existing IUCV communicaton path has been severed, -EPIPE is returned + * (that can be passed to HVC layer to cause a tty hangup). + */ +static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf, +			  int count) +{ +	size_t len; + +	if (priv->iucv_state == IUCV_DISCONN) +		return count;			/* ignore data */ + +	if (priv->iucv_state == IUCV_SEVERED) +		return -EPIPE; + +	len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len); +	if (!len) +		return 0; + +	memcpy(priv->sndbuf + priv->sndbuf_len, buf, len); +	priv->sndbuf_len += len; + +	if (priv->iucv_state == IUCV_CONNECTED) +		schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY); + +	return len; +} + +/** + * hvc_iucv_send() - Send an IUCV message containing terminal data. + * @priv:	Pointer to struct hvc_iucv_private instance. + * + * If an IUCV communication path has been established, the buffered output data + * is sent via an IUCV message and the number of bytes sent is returned. + * Returns 0 if there is no established IUCV communication path or + * -EPIPE if an existing IUCV communicaton path has been severed. + */ +static int hvc_iucv_send(struct hvc_iucv_private *priv) +{ +	struct iucv_tty_buffer *sb; +	int rc, len; + +	if (priv->iucv_state == IUCV_SEVERED) +		return -EPIPE; + +	if (priv->iucv_state == IUCV_DISCONN) +		return -EIO; + +	if (!priv->sndbuf_len) +		return 0; + +	/* allocate internal buffer to store msg data and also compute total +	 * message length */ +	sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC); +	if (!sb) +		return -ENOMEM; + +	memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len); +	sb->mbuf->datalen = (u16) priv->sndbuf_len; +	sb->msg.length = MSG_SIZE(sb->mbuf->datalen); + +	list_add_tail(&sb->list, &priv->tty_outqueue); + +	rc = __iucv_message_send(priv->path, &sb->msg, 0, 0, +				 (void *) sb->mbuf, sb->msg.length); +	if (rc) { +		/* drop the message here; however we might want to handle +		 * 0x03 (msg limit reached) by trying again... */ +		list_del(&sb->list); +		destroy_tty_buffer(sb); +	} +	len = priv->sndbuf_len; +	priv->sndbuf_len = 0; + +	return len; +} + +/** + * hvc_iucv_sndbuf_work() - Send buffered data over IUCV + * @work:	Work structure. + * + * This work queue function sends buffered output data over IUCV and, + * if not all buffered data could be sent, reschedules itself. + */ +static void hvc_iucv_sndbuf_work(struct work_struct *work) +{ +	struct hvc_iucv_private *priv; + +	priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work); +	if (!priv) +		return; + +	spin_lock_bh(&priv->lock); +	hvc_iucv_send(priv); +	spin_unlock_bh(&priv->lock); +} + +/** + * hvc_iucv_put_chars() - HVC put_chars operation. + * @vtermno:	HVC virtual terminal number. + * @buf:	Pointer to an buffer to read data from + * @count:	Size of buffer available for reading + * + * The HVC thread calls this method to write characters to the back-end. + * The function calls hvc_iucv_queue() to queue terminal data for sending. + * + * Locking:	The method gets called under an irqsave() spinlock; and + *		locks struct hvc_iucv_private->lock. + */ +static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count) +{ +	struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno); +	int queued; + +	if (count <= 0) +		return 0; + +	if (!priv) +		return -ENODEV; + +	spin_lock(&priv->lock); +	queued = hvc_iucv_queue(priv, buf, count); +	spin_unlock(&priv->lock); + +	return queued; +} + +/** + * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time. + * @hp:	Pointer to the HVC device (struct hvc_struct) + * @id:	Additional data (originally passed to hvc_alloc): the index of an struct + *	hvc_iucv_private instance. + * + * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private + * instance that is derived from @id. Always returns 0. + * + * Locking:	struct hvc_iucv_private->lock, spin_lock_bh + */ +static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id) +{ +	struct hvc_iucv_private *priv; + +	priv = hvc_iucv_get_private(id); +	if (!priv) +		return 0; + +	spin_lock_bh(&priv->lock); +	priv->tty_state = TTY_OPENED; +	spin_unlock_bh(&priv->lock); + +	return 0; +} + +/** + * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance. + * @priv:	Pointer to the struct hvc_iucv_private instance. + */ +static void hvc_iucv_cleanup(struct hvc_iucv_private *priv) +{ +	destroy_tty_buffer_list(&priv->tty_outqueue); +	destroy_tty_buffer_list(&priv->tty_inqueue); + +	priv->tty_state = TTY_CLOSED; +	priv->iucv_state = IUCV_DISCONN; + +	priv->sndbuf_len = 0; +} + +/** + * tty_outqueue_empty() - Test if the tty outq is empty + * @priv:	Pointer to struct hvc_iucv_private instance. + */ +static inline int tty_outqueue_empty(struct hvc_iucv_private *priv) +{ +	int rc; + +	spin_lock_bh(&priv->lock); +	rc = list_empty(&priv->tty_outqueue); +	spin_unlock_bh(&priv->lock); + +	return rc; +} + +/** + * flush_sndbuf_sync() - Flush send buffer and wait for completion + * @priv:	Pointer to struct hvc_iucv_private instance. + * + * The routine cancels a pending sndbuf work, calls hvc_iucv_send() + * to flush any buffered terminal output data and waits for completion. + */ +static void flush_sndbuf_sync(struct hvc_iucv_private *priv) +{ +	int sync_wait; + +	cancel_delayed_work_sync(&priv->sndbuf_work); + +	spin_lock_bh(&priv->lock); +	hvc_iucv_send(priv);		/* force sending buffered data */ +	sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */ +	spin_unlock_bh(&priv->lock); + +	if (sync_wait) +		wait_event_timeout(priv->sndbuf_waitq, +				   tty_outqueue_empty(priv), HZ/10); +} + +/** + * hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up + * @priv:	Pointer to hvc_iucv_private structure + * + * This routine severs an existing IUCV communication path and hangs + * up the underlying HVC terminal device. + * The hang-up occurs only if an IUCV communication path is established; + * otherwise there is no need to hang up the terminal device. + * + * The IUCV HVC hang-up is separated into two steps: + * 1. After the IUCV path has been severed, the iucv_state is set to + *    IUCV_SEVERED. + * 2. Later, when the HVC thread calls hvc_iucv_get_chars(), the + *    IUCV_SEVERED state causes the tty hang-up in the HVC layer. + * + * If the tty has not yet been opened, clean up the hvc_iucv_private + * structure to allow re-connects. + * If the tty has been opened, let get_chars() return -EPIPE to signal + * the HVC layer to hang up the tty and, if so, wake up the HVC thread + * to call get_chars()... + * + * Special notes on hanging up a HVC terminal instantiated as console: + * Hang-up:	1. do_tty_hangup() replaces file ops (= hung_up_tty_fops) + *		2. do_tty_hangup() calls tty->ops->close() for console_filp + *			=> no hangup notifier is called by HVC (default) + *		2. hvc_close() returns because of tty_hung_up_p(filp) + *			=> no delete notifier is called! + * Finally, the back-end is not being notified, thus, the tty session is + * kept active (TTY_OPEN) to be ready for re-connects. + * + * Locking:	spin_lock(&priv->lock) w/o disabling bh + */ +static void hvc_iucv_hangup(struct hvc_iucv_private *priv) +{ +	struct iucv_path *path; + +	path = NULL; +	spin_lock(&priv->lock); +	if (priv->iucv_state == IUCV_CONNECTED) { +		path = priv->path; +		priv->path = NULL; +		priv->iucv_state = IUCV_SEVERED; +		if (priv->tty_state == TTY_CLOSED) +			hvc_iucv_cleanup(priv); +		else +			/* console is special (see above) */ +			if (priv->is_console) { +				hvc_iucv_cleanup(priv); +				priv->tty_state = TTY_OPENED; +			} else +				hvc_kick(); +	} +	spin_unlock(&priv->lock); + +	/* finally sever path (outside of priv->lock due to lock ordering) */ +	if (path) { +		iucv_path_sever(path, NULL); +		iucv_path_free(path); +	} +} + +/** + * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups. + * @hp:		Pointer to the HVC device (struct hvc_struct) + * @id:		Additional data (originally passed to hvc_alloc): + *		the index of an struct hvc_iucv_private instance. + * + * This routine notifies the HVC back-end that a tty hangup (carrier loss, + * virtual or otherwise) has occurred. + * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup()) + * to keep an existing IUCV communication path established. + * (Background: vhangup() is called from user space (by getty or login) to + *		disable writing to the tty by other applications). + * If the tty has been opened and an established IUCV path has been severed + * (we caused the tty hangup), the function calls hvc_iucv_cleanup(). + * + * Locking:	struct hvc_iucv_private->lock + */ +static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id) +{ +	struct hvc_iucv_private *priv; + +	priv = hvc_iucv_get_private(id); +	if (!priv) +		return; + +	flush_sndbuf_sync(priv); + +	spin_lock_bh(&priv->lock); +	/* NOTE: If the hangup was scheduled by ourself (from the iucv +	 *	 path_servered callback [IUCV_SEVERED]), we have to clean up +	 *	 our structure and to set state to TTY_CLOSED. +	 *	 If the tty was hung up otherwise (e.g. vhangup()), then we +	 *	 ignore this hangup and keep an established IUCV path open... +	 *	 (...the reason is that we are not able to connect back to the +	 *	 client if we disconnect on hang up) */ +	priv->tty_state = TTY_CLOSED; + +	if (priv->iucv_state == IUCV_SEVERED) +		hvc_iucv_cleanup(priv); +	spin_unlock_bh(&priv->lock); +} + +/** + * hvc_iucv_dtr_rts() - HVC notifier for handling DTR/RTS + * @hp:		Pointer the HVC device (struct hvc_struct) + * @raise:	Non-zero to raise or zero to lower DTR/RTS lines + * + * This routine notifies the HVC back-end to raise or lower DTR/RTS + * lines.  Raising DTR/RTS is ignored.  Lowering DTR/RTS indicates to + * drop the IUCV connection (similar to hang up the modem). + */ +static void hvc_iucv_dtr_rts(struct hvc_struct *hp, int raise) +{ +	struct hvc_iucv_private *priv; +	struct iucv_path        *path; + +	/* Raising the DTR/RTS is ignored as IUCV connections can be +	 * established at any times. +	 */ +	if (raise) +		return; + +	priv = hvc_iucv_get_private(hp->vtermno); +	if (!priv) +		return; + +	/* Lowering the DTR/RTS lines disconnects an established IUCV +	 * connection. +	 */ +	flush_sndbuf_sync(priv); + +	spin_lock_bh(&priv->lock); +	path = priv->path;		/* save reference to IUCV path */ +	priv->path = NULL; +	priv->iucv_state = IUCV_DISCONN; +	spin_unlock_bh(&priv->lock); + +	/* Sever IUCV path outside of priv->lock due to lock ordering of: +	 * priv->lock <--> iucv_table_lock */ +	if (path) { +		iucv_path_sever(path, NULL); +		iucv_path_free(path); +	} +} + +/** + * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time. + * @hp:		Pointer to the HVC device (struct hvc_struct) + * @id:		Additional data (originally passed to hvc_alloc): + *		the index of an struct hvc_iucv_private instance. + * + * This routine notifies the HVC back-end that the last tty device fd has been + * closed.  The function cleans up tty resources.  The clean-up of the IUCV + * connection is done in hvc_iucv_dtr_rts() and depends on the HUPCL termios + * control setting. + * + * Locking:	struct hvc_iucv_private->lock + */ +static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id) +{ +	struct hvc_iucv_private *priv; + +	priv = hvc_iucv_get_private(id); +	if (!priv) +		return; + +	flush_sndbuf_sync(priv); + +	spin_lock_bh(&priv->lock); +	destroy_tty_buffer_list(&priv->tty_outqueue); +	destroy_tty_buffer_list(&priv->tty_inqueue); +	priv->tty_state = TTY_CLOSED; +	priv->sndbuf_len = 0; +	spin_unlock_bh(&priv->lock); +} + +/** + * hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID + * @ipvmid:	Originating z/VM user ID (right padded with blanks) + * + * Returns 0 if the z/VM user ID @ipvmid is allowed to connection, otherwise + * non-zero. + */ +static int hvc_iucv_filter_connreq(u8 ipvmid[8]) +{ +	size_t i; + +	/* Note: default policy is ACCEPT if no filter is set */ +	if (!hvc_iucv_filter_size) +		return 0; + +	for (i = 0; i < hvc_iucv_filter_size; i++) +		if (0 == memcmp(ipvmid, hvc_iucv_filter + (8 * i), 8)) +			return 0; +	return 1; +} + +/** + * hvc_iucv_path_pending() - IUCV handler to process a connection request. + * @path:	Pending path (struct iucv_path) + * @ipvmid:	z/VM system identifier of originator + * @ipuser:	User specified data for this path + *		(AF_IUCV: port/service name and originator port) + * + * The function uses the @ipuser data to determine if the pending path belongs + * to a terminal managed by this device driver. + * If the path belongs to this driver, ensure that the terminal is not accessed + * multiple times (only one connection to a terminal is allowed). + * If the terminal is not yet connected, the pending path is accepted and is + * associated to the appropriate struct hvc_iucv_private instance. + * + * Returns 0 if @path belongs to a terminal managed by the this device driver; + * otherwise returns -ENODEV in order to dispatch this path to other handlers. + * + * Locking:	struct hvc_iucv_private->lock + */ +static	int hvc_iucv_path_pending(struct iucv_path *path, +				  u8 ipvmid[8], u8 ipuser[16]) +{ +	struct hvc_iucv_private *priv, *tmp; +	u8 wildcard[9] = "lnxhvc  "; +	int i, rc, find_unused; +	u8 nuser_data[16]; +	u8 vm_user_id[9]; + +	ASCEBC(wildcard, sizeof(wildcard)); +	find_unused = !memcmp(wildcard, ipuser, 8); + +	/* First, check if the pending path request is managed by this +	 * IUCV handler: +	 * - find a disconnected device if ipuser contains the wildcard +	 * - find the device that matches the terminal ID in ipuser +	 */ +	priv = NULL; +	for (i = 0; i < hvc_iucv_devices; i++) { +		tmp = hvc_iucv_table[i]; +		if (!tmp) +			continue; + +		if (find_unused) { +			spin_lock(&tmp->lock); +			if (tmp->iucv_state == IUCV_DISCONN) +				priv = tmp; +			spin_unlock(&tmp->lock); + +		} else if (!memcmp(tmp->srv_name, ipuser, 8)) +				priv = tmp; +		if (priv) +			break; +	} +	if (!priv) +		return -ENODEV; + +	/* Enforce that ipvmid is allowed to connect to us */ +	read_lock(&hvc_iucv_filter_lock); +	rc = hvc_iucv_filter_connreq(ipvmid); +	read_unlock(&hvc_iucv_filter_lock); +	if (rc) { +		iucv_path_sever(path, ipuser); +		iucv_path_free(path); +		memcpy(vm_user_id, ipvmid, 8); +		vm_user_id[8] = 0; +		pr_info("A connection request from z/VM user ID %s " +			"was refused\n", vm_user_id); +		return 0; +	} + +	spin_lock(&priv->lock); + +	/* If the terminal is already connected or being severed, then sever +	 * this path to enforce that there is only ONE established communication +	 * path per terminal. */ +	if (priv->iucv_state != IUCV_DISCONN) { +		iucv_path_sever(path, ipuser); +		iucv_path_free(path); +		goto out_path_handled; +	} + +	/* accept path */ +	memcpy(nuser_data, ipuser + 8, 8);  /* remote service (for af_iucv) */ +	memcpy(nuser_data + 8, ipuser, 8);  /* local service  (for af_iucv) */ +	path->msglim = 0xffff;		    /* IUCV MSGLIMIT */ +	path->flags &= ~IUCV_IPRMDATA;	    /* TODO: use IUCV_IPRMDATA */ +	rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv); +	if (rc) { +		iucv_path_sever(path, ipuser); +		iucv_path_free(path); +		goto out_path_handled; +	} +	priv->path = path; +	priv->iucv_state = IUCV_CONNECTED; + +	/* store path information */ +	memcpy(priv->info_path, ipvmid, 8); +	memcpy(priv->info_path + 8, ipuser + 8, 8); + +	/* flush buffered output data... */ +	schedule_delayed_work(&priv->sndbuf_work, 5); + +out_path_handled: +	spin_unlock(&priv->lock); +	return 0; +} + +/** + * hvc_iucv_path_severed() - IUCV handler to process a path sever. + * @path:	Pending path (struct iucv_path) + * @ipuser:	User specified data for this path + *		(AF_IUCV: port/service name and originator port) + * + * This function calls the hvc_iucv_hangup() function for the + * respective IUCV HVC terminal. + * + * Locking:	struct hvc_iucv_private->lock + */ +static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) +{ +	struct hvc_iucv_private *priv = path->private; + +	hvc_iucv_hangup(priv); +} + +/** + * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message. + * @path:	Pending path (struct iucv_path) + * @msg:	Pointer to the IUCV message + * + * The function puts an incoming message on the input queue for later + * processing (by hvc_iucv_get_chars() / hvc_iucv_write()). + * If the tty has not yet been opened, the message is rejected. + * + * Locking:	struct hvc_iucv_private->lock + */ +static void hvc_iucv_msg_pending(struct iucv_path *path, +				 struct iucv_message *msg) +{ +	struct hvc_iucv_private *priv = path->private; +	struct iucv_tty_buffer *rb; + +	/* reject messages that exceed max size of iucv_tty_msg->datalen */ +	if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) { +		iucv_message_reject(path, msg); +		return; +	} + +	spin_lock(&priv->lock); + +	/* reject messages if tty has not yet been opened */ +	if (priv->tty_state == TTY_CLOSED) { +		iucv_message_reject(path, msg); +		goto unlock_return; +	} + +	/* allocate tty buffer to save iucv msg only */ +	rb = alloc_tty_buffer(0, GFP_ATOMIC); +	if (!rb) { +		iucv_message_reject(path, msg); +		goto unlock_return;	/* -ENOMEM */ +	} +	rb->msg = *msg; + +	list_add_tail(&rb->list, &priv->tty_inqueue); + +	hvc_kick();	/* wake up hvc thread */ + +unlock_return: +	spin_unlock(&priv->lock); +} + +/** + * hvc_iucv_msg_complete() - IUCV handler to process message completion + * @path:	Pending path (struct iucv_path) + * @msg:	Pointer to the IUCV message + * + * The function is called upon completion of message delivery to remove the + * message from the outqueue. Additional delivery information can be found + * msg->audit: rejected messages (0x040000 (IPADRJCT)), and + *	       purged messages	 (0x010000 (IPADPGNR)). + * + * Locking:	struct hvc_iucv_private->lock + */ +static void hvc_iucv_msg_complete(struct iucv_path *path, +				  struct iucv_message *msg) +{ +	struct hvc_iucv_private *priv = path->private; +	struct iucv_tty_buffer	*ent, *next; +	LIST_HEAD(list_remove); + +	spin_lock(&priv->lock); +	list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list) +		if (ent->msg.id == msg->id) { +			list_move(&ent->list, &list_remove); +			break; +		} +	wake_up(&priv->sndbuf_waitq); +	spin_unlock(&priv->lock); +	destroy_tty_buffer_list(&list_remove); +} + +/** + * hvc_iucv_pm_freeze() - Freeze PM callback + * @dev:	IUVC HVC terminal device + * + * Sever an established IUCV communication path and + * trigger a hang-up of the underlying HVC terminal. + */ +static int hvc_iucv_pm_freeze(struct device *dev) +{ +	struct hvc_iucv_private *priv = dev_get_drvdata(dev); + +	local_bh_disable(); +	hvc_iucv_hangup(priv); +	local_bh_enable(); + +	return 0; +} + +/** + * hvc_iucv_pm_restore_thaw() - Thaw and restore PM callback + * @dev:	IUVC HVC terminal device + * + * Wake up the HVC thread to trigger hang-up and respective + * HVC back-end notifier invocations. + */ +static int hvc_iucv_pm_restore_thaw(struct device *dev) +{ +	hvc_kick(); +	return 0; +} + +static ssize_t hvc_iucv_dev_termid_show(struct device *dev, +					struct device_attribute *attr, +					char *buf) +{ +	struct hvc_iucv_private *priv = dev_get_drvdata(dev); +	size_t len; + +	len = sizeof(priv->srv_name); +	memcpy(buf, priv->srv_name, len); +	EBCASC(buf, len); +	buf[len++] = '\n'; +	return len; +} + +static ssize_t hvc_iucv_dev_state_show(struct device *dev, +					struct device_attribute *attr, +					char *buf) +{ +	struct hvc_iucv_private *priv = dev_get_drvdata(dev); +	return sprintf(buf, "%u:%u\n", priv->iucv_state, priv->tty_state); +} + +static ssize_t hvc_iucv_dev_peer_show(struct device *dev, +				      struct device_attribute *attr, +				      char *buf) +{ +	struct hvc_iucv_private *priv = dev_get_drvdata(dev); +	char vmid[9], ipuser[9]; + +	memset(vmid, 0, sizeof(vmid)); +	memset(ipuser, 0, sizeof(ipuser)); + +	spin_lock_bh(&priv->lock); +	if (priv->iucv_state == IUCV_CONNECTED) { +		memcpy(vmid, priv->info_path, 8); +		memcpy(ipuser, priv->info_path + 8, 8); +	} +	spin_unlock_bh(&priv->lock); +	EBCASC(ipuser, 8); + +	return sprintf(buf, "%s:%s\n", vmid, ipuser); +} + + +/* HVC operations */ +static const struct hv_ops hvc_iucv_ops = { +	.get_chars = hvc_iucv_get_chars, +	.put_chars = hvc_iucv_put_chars, +	.notifier_add = hvc_iucv_notifier_add, +	.notifier_del = hvc_iucv_notifier_del, +	.notifier_hangup = hvc_iucv_notifier_hangup, +	.dtr_rts = hvc_iucv_dtr_rts, +}; + +/* Suspend / resume device operations */ +static const struct dev_pm_ops hvc_iucv_pm_ops = { +	.freeze	  = hvc_iucv_pm_freeze, +	.thaw	  = hvc_iucv_pm_restore_thaw, +	.restore  = hvc_iucv_pm_restore_thaw, +}; + +/* IUCV HVC device driver */ +static struct device_driver hvc_iucv_driver = { +	.name = KMSG_COMPONENT, +	.bus  = &iucv_bus, +	.pm   = &hvc_iucv_pm_ops, +}; + +/* IUCV HVC device attributes */ +static DEVICE_ATTR(termid, 0640, hvc_iucv_dev_termid_show, NULL); +static DEVICE_ATTR(state, 0640, hvc_iucv_dev_state_show, NULL); +static DEVICE_ATTR(peer, 0640, hvc_iucv_dev_peer_show, NULL); +static struct attribute *hvc_iucv_dev_attrs[] = { +	&dev_attr_termid.attr, +	&dev_attr_state.attr, +	&dev_attr_peer.attr, +	NULL, +}; +static struct attribute_group hvc_iucv_dev_attr_group = { +	.attrs = hvc_iucv_dev_attrs, +}; +static const struct attribute_group *hvc_iucv_dev_attr_groups[] = { +	&hvc_iucv_dev_attr_group, +	NULL, +}; + + +/** + * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance + * @id:			hvc_iucv_table index + * @is_console:		Flag if the instance is used as Linux console + * + * This function allocates a new hvc_iucv_private structure and stores + * the instance in hvc_iucv_table at index @id. + * Returns 0 on success; otherwise non-zero. + */ +static int __init hvc_iucv_alloc(int id, unsigned int is_console) +{ +	struct hvc_iucv_private *priv; +	char name[9]; +	int rc; + +	priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL); +	if (!priv) +		return -ENOMEM; + +	spin_lock_init(&priv->lock); +	INIT_LIST_HEAD(&priv->tty_outqueue); +	INIT_LIST_HEAD(&priv->tty_inqueue); +	INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work); +	init_waitqueue_head(&priv->sndbuf_waitq); + +	priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL); +	if (!priv->sndbuf) { +		kfree(priv); +		return -ENOMEM; +	} + +	/* set console flag */ +	priv->is_console = is_console; + +	/* allocate hvc device */ +	priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /*		  PAGE_SIZE */ +			      HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256); +	if (IS_ERR(priv->hvc)) { +		rc = PTR_ERR(priv->hvc); +		goto out_error_hvc; +	} + +	/* notify HVC thread instead of using polling */ +	priv->hvc->irq_requested = 1; + +	/* setup iucv related information */ +	snprintf(name, 9, "lnxhvc%-2d", id); +	memcpy(priv->srv_name, name, 8); +	ASCEBC(priv->srv_name, 8); + +	/* create and setup device */ +	priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL); +	if (!priv->dev) { +		rc = -ENOMEM; +		goto out_error_dev; +	} +	dev_set_name(priv->dev, "hvc_iucv%d", id); +	dev_set_drvdata(priv->dev, priv); +	priv->dev->bus = &iucv_bus; +	priv->dev->parent = iucv_root; +	priv->dev->driver = &hvc_iucv_driver; +	priv->dev->groups = hvc_iucv_dev_attr_groups; +	priv->dev->release = (void (*)(struct device *)) kfree; +	rc = device_register(priv->dev); +	if (rc) { +		put_device(priv->dev); +		goto out_error_dev; +	} + +	hvc_iucv_table[id] = priv; +	return 0; + +out_error_dev: +	hvc_remove(priv->hvc); +out_error_hvc: +	free_page((unsigned long) priv->sndbuf); +	kfree(priv); + +	return rc; +} + +/** + * hvc_iucv_destroy() - Destroy and free hvc_iucv_private instances + */ +static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv) +{ +	hvc_remove(priv->hvc); +	device_unregister(priv->dev); +	free_page((unsigned long) priv->sndbuf); +	kfree(priv); +} + +/** + * hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID + * @filter:	String containing a comma-separated list of z/VM user IDs + */ +static const char *hvc_iucv_parse_filter(const char *filter, char *dest) +{ +	const char *nextdelim, *residual; +	size_t len; + +	nextdelim = strchr(filter, ','); +	if (nextdelim) { +		len = nextdelim - filter; +		residual = nextdelim + 1; +	} else { +		len = strlen(filter); +		residual = filter + len; +	} + +	if (len == 0) +		return ERR_PTR(-EINVAL); + +	/* check for '\n' (if called from sysfs) */ +	if (filter[len - 1] == '\n') +		len--; + +	if (len > 8) +		return ERR_PTR(-EINVAL); + +	/* pad with blanks and save upper case version of user ID */ +	memset(dest, ' ', 8); +	while (len--) +		dest[len] = toupper(filter[len]); +	return residual; +} + +/** + * hvc_iucv_setup_filter() - Set up z/VM user ID filter + * @filter:	String consisting of a comma-separated list of z/VM user IDs + * + * The function parses the @filter string and creates an array containing + * the list of z/VM user ID filter entries. + * Return code 0 means success, -EINVAL if the filter is syntactically + * incorrect, -ENOMEM if there was not enough memory to allocate the + * filter list array, or -ENOSPC if too many z/VM user IDs have been specified. + */ +static int hvc_iucv_setup_filter(const char *val) +{ +	const char *residual; +	int err; +	size_t size, count; +	void *array, *old_filter; + +	count = strlen(val); +	if (count == 0 || (count == 1 && val[0] == '\n')) { +		size  = 0; +		array = NULL; +		goto out_replace_filter;	/* clear filter */ +	} + +	/* count user IDs in order to allocate sufficient memory */ +	size = 1; +	residual = val; +	while ((residual = strchr(residual, ',')) != NULL) { +		residual++; +		size++; +	} + +	/* check if the specified list exceeds the filter limit */ +	if (size > MAX_VMID_FILTER) +		return -ENOSPC; + +	array = kzalloc(size * 8, GFP_KERNEL); +	if (!array) +		return -ENOMEM; + +	count = size; +	residual = val; +	while (*residual && count) { +		residual = hvc_iucv_parse_filter(residual, +						 array + ((size - count) * 8)); +		if (IS_ERR(residual)) { +			err = PTR_ERR(residual); +			kfree(array); +			goto out_err; +		} +		count--; +	} + +out_replace_filter: +	write_lock_bh(&hvc_iucv_filter_lock); +	old_filter = hvc_iucv_filter; +	hvc_iucv_filter_size = size; +	hvc_iucv_filter = array; +	write_unlock_bh(&hvc_iucv_filter_lock); +	kfree(old_filter); + +	err = 0; +out_err: +	return err; +} + +/** + * param_set_vmidfilter() - Set z/VM user ID filter parameter + * @val:	String consisting of a comma-separated list of z/VM user IDs + * @kp:		Kernel parameter pointing to hvc_iucv_filter array + * + * The function sets up the z/VM user ID filter specified as comma-separated + * list of user IDs in @val. + * Note: If it is called early in the boot process, @val is stored and + *	 parsed later in hvc_iucv_init(). + */ +static int param_set_vmidfilter(const char *val, const struct kernel_param *kp) +{ +	int rc; + +	if (!MACHINE_IS_VM || !hvc_iucv_devices) +		return -ENODEV; + +	if (!val) +		return -EINVAL; + +	rc = 0; +	if (slab_is_available()) +		rc = hvc_iucv_setup_filter(val); +	else +		hvc_iucv_filter_string = val;	/* defer... */ +	return rc; +} + +/** + * param_get_vmidfilter() - Get z/VM user ID filter + * @buffer:	Buffer to store z/VM user ID filter, + *		(buffer size assumption PAGE_SIZE) + * @kp:		Kernel parameter pointing to the hvc_iucv_filter array + * + * The function stores the filter as a comma-separated list of z/VM user IDs + * in @buffer. Typically, sysfs routines call this function for attr show. + */ +static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp) +{ +	int rc; +	size_t index, len; +	void *start, *end; + +	if (!MACHINE_IS_VM || !hvc_iucv_devices) +		return -ENODEV; + +	rc = 0; +	read_lock_bh(&hvc_iucv_filter_lock); +	for (index = 0; index < hvc_iucv_filter_size; index++) { +		start = hvc_iucv_filter + (8 * index); +		end   = memchr(start, ' ', 8); +		len   = (end) ? end - start : 8; +		memcpy(buffer + rc, start, len); +		rc += len; +		buffer[rc++] = ','; +	} +	read_unlock_bh(&hvc_iucv_filter_lock); +	if (rc) +		buffer[--rc] = '\0';	/* replace last comma and update rc */ +	return rc; +} + +#define param_check_vmidfilter(name, p) __param_check(name, p, void) + +static struct kernel_param_ops param_ops_vmidfilter = { +	.set = param_set_vmidfilter, +	.get = param_get_vmidfilter, +}; + +/** + * hvc_iucv_init() - z/VM IUCV HVC device driver initialization + */ +static int __init hvc_iucv_init(void) +{ +	int rc; +	unsigned int i; + +	if (!hvc_iucv_devices) +		return -ENODEV; + +	if (!MACHINE_IS_VM) { +		pr_notice("The z/VM IUCV HVC device driver cannot " +			   "be used without z/VM\n"); +		rc = -ENODEV; +		goto out_error; +	} + +	if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) { +		pr_err("%lu is not a valid value for the hvc_iucv= " +			"kernel parameter\n", hvc_iucv_devices); +		rc = -EINVAL; +		goto out_error; +	} + +	/* register IUCV HVC device driver */ +	rc = driver_register(&hvc_iucv_driver); +	if (rc) +		goto out_error; + +	/* parse hvc_iucv_allow string and create z/VM user ID filter list */ +	if (hvc_iucv_filter_string) { +		rc = hvc_iucv_setup_filter(hvc_iucv_filter_string); +		switch (rc) { +		case 0: +			break; +		case -ENOMEM: +			pr_err("Allocating memory failed with " +				"reason code=%d\n", 3); +			goto out_error; +		case -EINVAL: +			pr_err("hvc_iucv_allow= does not specify a valid " +				"z/VM user ID list\n"); +			goto out_error; +		case -ENOSPC: +			pr_err("hvc_iucv_allow= specifies too many " +				"z/VM user IDs\n"); +			goto out_error; +		default: +			goto out_error; +		} +	} + +	hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT, +					   sizeof(struct iucv_tty_buffer), +					   0, 0, NULL); +	if (!hvc_iucv_buffer_cache) { +		pr_err("Allocating memory failed with reason code=%d\n", 1); +		rc = -ENOMEM; +		goto out_error; +	} + +	hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR, +						    hvc_iucv_buffer_cache); +	if (!hvc_iucv_mempool) { +		pr_err("Allocating memory failed with reason code=%d\n", 2); +		kmem_cache_destroy(hvc_iucv_buffer_cache); +		rc = -ENOMEM; +		goto out_error; +	} + +	/* register the first terminal device as console +	 * (must be done before allocating hvc terminal devices) */ +	rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops); +	if (rc) { +		pr_err("Registering HVC terminal device as " +		       "Linux console failed\n"); +		goto out_error_memory; +	} + +	/* allocate hvc_iucv_private structs */ +	for (i = 0; i < hvc_iucv_devices; i++) { +		rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0); +		if (rc) { +			pr_err("Creating a new HVC terminal device " +				"failed with error code=%d\n", rc); +			goto out_error_hvc; +		} +	} + +	/* register IUCV callback handler */ +	rc = iucv_register(&hvc_iucv_handler, 0); +	if (rc) { +		pr_err("Registering IUCV handlers failed with error code=%d\n", +			rc); +		goto out_error_hvc; +	} + +	return 0; + +out_error_hvc: +	for (i = 0; i < hvc_iucv_devices; i++) +		if (hvc_iucv_table[i]) +			hvc_iucv_destroy(hvc_iucv_table[i]); +out_error_memory: +	mempool_destroy(hvc_iucv_mempool); +	kmem_cache_destroy(hvc_iucv_buffer_cache); +out_error: +	kfree(hvc_iucv_filter); +	hvc_iucv_devices = 0; /* ensure that we do not provide any device */ +	return rc; +} + +/** + * hvc_iucv_config() - Parsing of hvc_iucv=  kernel command line parameter + * @val:	Parameter value (numeric) + */ +static	int __init hvc_iucv_config(char *val) +{ +	 return kstrtoul(val, 10, &hvc_iucv_devices); +} + + +device_initcall(hvc_iucv_init); +__setup("hvc_iucv=", hvc_iucv_config); +core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640); diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c new file mode 100644 index 00000000000..a585079b4b3 --- /dev/null +++ b/drivers/tty/hvc/hvc_opal.c @@ -0,0 +1,439 @@ +/* + * opal driver interface to hvc_console.c + * + * Copyright 2011 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA + * + */ + +#undef DEBUG + +#include <linux/types.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/console.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/export.h> + +#include <asm/hvconsole.h> +#include <asm/prom.h> +#include <asm/firmware.h> +#include <asm/hvsi.h> +#include <asm/udbg.h> +#include <asm/opal.h> + +#include "hvc_console.h" + +static const char hvc_opal_name[] = "hvc_opal"; + +static struct of_device_id hvc_opal_match[] = { +	{ .name = "serial", .compatible = "ibm,opal-console-raw" }, +	{ .name = "serial", .compatible = "ibm,opal-console-hvsi" }, +	{ }, +}; + +typedef enum hv_protocol { +	HV_PROTOCOL_RAW, +	HV_PROTOCOL_HVSI +} hv_protocol_t; + +struct hvc_opal_priv { +	hv_protocol_t		proto;	/* Raw data or HVSI packets */ +	struct hvsi_priv	hvsi;	/* HVSI specific data */ +}; +static struct hvc_opal_priv *hvc_opal_privs[MAX_NR_HVC_CONSOLES]; + +/* For early boot console */ +static struct hvc_opal_priv hvc_opal_boot_priv; +static u32 hvc_opal_boot_termno; +static bool hvc_opal_event_registered; + +static const struct hv_ops hvc_opal_raw_ops = { +	.get_chars = opal_get_chars, +	.put_chars = opal_put_chars, +	.notifier_add = notifier_add_irq, +	.notifier_del = notifier_del_irq, +	.notifier_hangup = notifier_hangup_irq, +}; + +static int hvc_opal_hvsi_get_chars(uint32_t vtermno, char *buf, int count) +{ +	struct hvc_opal_priv *pv = hvc_opal_privs[vtermno]; + +	if (WARN_ON(!pv)) +		return -ENODEV; + +	return hvsilib_get_chars(&pv->hvsi, buf, count); +} + +static int hvc_opal_hvsi_put_chars(uint32_t vtermno, const char *buf, int count) +{ +	struct hvc_opal_priv *pv = hvc_opal_privs[vtermno]; + +	if (WARN_ON(!pv)) +		return -ENODEV; + +	return hvsilib_put_chars(&pv->hvsi, buf, count); +} + +static int hvc_opal_hvsi_open(struct hvc_struct *hp, int data) +{ +	struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno]; +	int rc; + +	pr_devel("HVSI@%x: do open !\n", hp->vtermno); + +	rc = notifier_add_irq(hp, data); +	if (rc) +		return rc; + +	return hvsilib_open(&pv->hvsi, hp); +} + +static void hvc_opal_hvsi_close(struct hvc_struct *hp, int data) +{ +	struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno]; + +	pr_devel("HVSI@%x: do close !\n", hp->vtermno); + +	hvsilib_close(&pv->hvsi, hp); + +	notifier_del_irq(hp, data); +} + +void hvc_opal_hvsi_hangup(struct hvc_struct *hp, int data) +{ +	struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno]; + +	pr_devel("HVSI@%x: do hangup !\n", hp->vtermno); + +	hvsilib_close(&pv->hvsi, hp); + +	notifier_hangup_irq(hp, data); +} + +static int hvc_opal_hvsi_tiocmget(struct hvc_struct *hp) +{ +	struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno]; + +	if (!pv) +		return -EINVAL; +	return pv->hvsi.mctrl; +} + +static int hvc_opal_hvsi_tiocmset(struct hvc_struct *hp, unsigned int set, +				unsigned int clear) +{ +	struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno]; + +	pr_devel("HVSI@%x: Set modem control, set=%x,clr=%x\n", +		 hp->vtermno, set, clear); + +	if (set & TIOCM_DTR) +		hvsilib_write_mctrl(&pv->hvsi, 1); +	else if (clear & TIOCM_DTR) +		hvsilib_write_mctrl(&pv->hvsi, 0); + +	return 0; +} + +static const struct hv_ops hvc_opal_hvsi_ops = { +	.get_chars = hvc_opal_hvsi_get_chars, +	.put_chars = hvc_opal_hvsi_put_chars, +	.notifier_add = hvc_opal_hvsi_open, +	.notifier_del = hvc_opal_hvsi_close, +	.notifier_hangup = hvc_opal_hvsi_hangup, +	.tiocmget = hvc_opal_hvsi_tiocmget, +	.tiocmset = hvc_opal_hvsi_tiocmset, +}; + +static int hvc_opal_console_event(struct notifier_block *nb, +				  unsigned long events, void *change) +{ +	if (events & OPAL_EVENT_CONSOLE_INPUT) +		hvc_kick(); +	return 0; +} + +static struct notifier_block hvc_opal_console_nb = { +	.notifier_call	= hvc_opal_console_event, +}; + +static int hvc_opal_probe(struct platform_device *dev) +{ +	const struct hv_ops *ops; +	struct hvc_struct *hp; +	struct hvc_opal_priv *pv; +	hv_protocol_t proto; +	unsigned int termno, boot = 0; +	const __be32 *reg; + + +	if (of_device_is_compatible(dev->dev.of_node, "ibm,opal-console-raw")) { +		proto = HV_PROTOCOL_RAW; +		ops = &hvc_opal_raw_ops; +	} else if (of_device_is_compatible(dev->dev.of_node, +					   "ibm,opal-console-hvsi")) { +		proto = HV_PROTOCOL_HVSI; +		ops = &hvc_opal_hvsi_ops; +	} else { +		pr_err("hvc_opal: Unknown protocol for %s\n", +		       dev->dev.of_node->full_name); +		return -ENXIO; +	} + +	reg = of_get_property(dev->dev.of_node, "reg", NULL); +	termno = reg ? be32_to_cpup(reg) : 0; + +	/* Is it our boot one ? */ +	if (hvc_opal_privs[termno] == &hvc_opal_boot_priv) { +		pv = hvc_opal_privs[termno]; +		boot = 1; +	} else if (hvc_opal_privs[termno] == NULL) { +		pv = kzalloc(sizeof(struct hvc_opal_priv), GFP_KERNEL); +		if (!pv) +			return -ENOMEM; +		pv->proto = proto; +		hvc_opal_privs[termno] = pv; +		if (proto == HV_PROTOCOL_HVSI) +			hvsilib_init(&pv->hvsi, opal_get_chars, opal_put_chars, +				     termno, 0); + +		/* Instanciate now to establish a mapping index==vtermno */ +		hvc_instantiate(termno, termno, ops); +	} else { +		pr_err("hvc_opal: Device %s has duplicate terminal number #%d\n", +		       dev->dev.of_node->full_name, termno); +		return -ENXIO; +	} + +	pr_info("hvc%d: %s protocol on %s%s\n", termno, +		proto == HV_PROTOCOL_RAW ? "raw" : "hvsi", +		dev->dev.of_node->full_name, +		boot ? " (boot console)" : ""); + +	/* We don't do IRQ ... */ +	hp = hvc_alloc(termno, 0, ops, MAX_VIO_PUT_CHARS); +	if (IS_ERR(hp)) +		return PTR_ERR(hp); +	dev_set_drvdata(&dev->dev, hp); + +	/* ...  but we use OPAL event to kick the console */ +	if (!hvc_opal_event_registered) { +		opal_notifier_register(&hvc_opal_console_nb); +		hvc_opal_event_registered = true; +	} + +	return 0; +} + +static int hvc_opal_remove(struct platform_device *dev) +{ +	struct hvc_struct *hp = dev_get_drvdata(&dev->dev); +	int rc, termno; + +	termno = hp->vtermno; +	rc = hvc_remove(hp); +	if (rc == 0) { +		if (hvc_opal_privs[termno] != &hvc_opal_boot_priv) +			kfree(hvc_opal_privs[termno]); +		hvc_opal_privs[termno] = NULL; +	} +	return rc; +} + +static struct platform_driver hvc_opal_driver = { +	.probe		= hvc_opal_probe, +	.remove		= hvc_opal_remove, +	.driver		= { +		.name	= hvc_opal_name, +		.owner	= THIS_MODULE, +		.of_match_table	= hvc_opal_match, +	} +}; + +static int __init hvc_opal_init(void) +{ +	if (!firmware_has_feature(FW_FEATURE_OPAL)) +		return -ENODEV; + +	/* Register as a vio device to receive callbacks */ +	return platform_driver_register(&hvc_opal_driver); +} +device_initcall(hvc_opal_init); + +static void udbg_opal_putc(char c) +{ +	unsigned int termno = hvc_opal_boot_termno; +	int count = -1; + +	if (c == '\n') +		udbg_opal_putc('\r'); + +	do { +		switch(hvc_opal_boot_priv.proto) { +		case HV_PROTOCOL_RAW: +			count = opal_put_chars(termno, &c, 1); +			break; +		case HV_PROTOCOL_HVSI: +			count = hvc_opal_hvsi_put_chars(termno, &c, 1); +			break; +		} +	} while(count == 0 || count == -EAGAIN); +} + +static int udbg_opal_getc_poll(void) +{ +	unsigned int termno = hvc_opal_boot_termno; +	int rc = 0; +	char c; + +	switch(hvc_opal_boot_priv.proto) { +	case HV_PROTOCOL_RAW: +		rc = opal_get_chars(termno, &c, 1); +		break; +	case HV_PROTOCOL_HVSI: +		rc = hvc_opal_hvsi_get_chars(termno, &c, 1); +		break; +	} +	if (!rc) +		return -1; +	return c; +} + +static int udbg_opal_getc(void) +{ +	int ch; +	for (;;) { +		ch = udbg_opal_getc_poll(); +		if (ch == -1) { +			/* This shouldn't be needed...but... */ +			volatile unsigned long delay; +			for (delay=0; delay < 2000000; delay++) +				; +		} else { +			return ch; +		} +	} +} + +static void udbg_init_opal_common(void) +{ +	udbg_putc = udbg_opal_putc; +	udbg_getc = udbg_opal_getc; +	udbg_getc_poll = udbg_opal_getc_poll; +	tb_ticks_per_usec = 0x200; /* Make udelay not suck */ +} + +void __init hvc_opal_init_early(void) +{ +	struct device_node *stdout_node = NULL; +	const __be32 *termno; +	const char *name = NULL; +	const struct hv_ops *ops; +	u32 index; + +	/* find the boot console from /chosen/stdout */ +	if (of_chosen) +		name = of_get_property(of_chosen, "linux,stdout-path", NULL); +	if (name) { +		stdout_node = of_find_node_by_path(name); +		if (!stdout_node) { +			pr_err("hvc_opal: Failed to locate default console!\n"); +			return; +		} +	} else { +		struct device_node *opal, *np; + +		/* Current OPAL takeover doesn't provide the stdout +		 * path, so we hard wire it +		 */ +		opal = of_find_node_by_path("/ibm,opal/consoles"); +		if (opal) +			pr_devel("hvc_opal: Found consoles in new location\n"); +		if (!opal) { +			opal = of_find_node_by_path("/ibm,opal"); +			if (opal) +				pr_devel("hvc_opal: " +					 "Found consoles in old location\n"); +		} +		if (!opal) +			return; +		for_each_child_of_node(opal, np) { +			if (!strcmp(np->name, "serial")) { +				stdout_node = np; +				break; +			} +		} +		of_node_put(opal); +	} +	if (!stdout_node) +		return; +	termno = of_get_property(stdout_node, "reg", NULL); +	index = termno ? be32_to_cpup(termno) : 0; +	if (index >= MAX_NR_HVC_CONSOLES) +		return; +	hvc_opal_privs[index] = &hvc_opal_boot_priv; + +	/* Check the protocol */ +	if (of_device_is_compatible(stdout_node, "ibm,opal-console-raw")) { +		hvc_opal_boot_priv.proto = HV_PROTOCOL_RAW; +		ops = &hvc_opal_raw_ops; +		pr_devel("hvc_opal: Found RAW console\n"); +	} +	else if (of_device_is_compatible(stdout_node,"ibm,opal-console-hvsi")) { +		hvc_opal_boot_priv.proto = HV_PROTOCOL_HVSI; +		ops = &hvc_opal_hvsi_ops; +		hvsilib_init(&hvc_opal_boot_priv.hvsi, opal_get_chars, +			     opal_put_chars, index, 1); +		/* HVSI, perform the handshake now */ +		hvsilib_establish(&hvc_opal_boot_priv.hvsi); +		pr_devel("hvc_opal: Found HVSI console\n"); +	} else +		goto out; +	hvc_opal_boot_termno = index; +	udbg_init_opal_common(); +	add_preferred_console("hvc", index, NULL); +	hvc_instantiate(index, index, ops); +out: +	of_node_put(stdout_node); +} + +#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_RAW +void __init udbg_init_debug_opal_raw(void) +{ +	u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO; +	hvc_opal_privs[index] = &hvc_opal_boot_priv; +	hvc_opal_boot_priv.proto = HV_PROTOCOL_RAW; +	hvc_opal_boot_termno = index; +	udbg_init_opal_common(); +} +#endif /* CONFIG_PPC_EARLY_DEBUG_OPAL_RAW */ + +#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_HVSI +void __init udbg_init_debug_opal_hvsi(void) +{ +	u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO; +	hvc_opal_privs[index] = &hvc_opal_boot_priv; +	hvc_opal_boot_termno = index; +	udbg_init_opal_common(); +	hvsilib_init(&hvc_opal_boot_priv.hvsi, opal_get_chars, opal_put_chars, +		     index, 1); +	hvsilib_establish(&hvc_opal_boot_priv.hvsi); +} +#endif /* CONFIG_PPC_EARLY_DEBUG_OPAL_HVSI */ diff --git a/drivers/tty/hvc/hvc_rtas.c b/drivers/tty/hvc/hvc_rtas.c new file mode 100644 index 00000000000..08c87920b74 --- /dev/null +++ b/drivers/tty/hvc/hvc_rtas.c @@ -0,0 +1,123 @@ +/* + * IBM RTAS driver interface to hvc_console.c + * + * (C) Copyright IBM Corporation 2001-2005 + * (C) Copyright Red Hat, Inc. 2005 + * + * Author(s): Maximino Augilar <IBM STI Design Center> + *	    : Ryan S. Arnold <rsa@us.ibm.com> + *	    : Utz Bacher <utz.bacher@de.ibm.com> + *	    : David Woodhouse <dwmw2@infradead.org> + * + *    inspired by drivers/char/hvc_console.c + *    written by Anton Blanchard and Paul Mackerras + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA + */ + +#include <linux/console.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/moduleparam.h> +#include <linux/types.h> + +#include <asm/irq.h> +#include <asm/rtas.h> +#include "hvc_console.h" + +#define hvc_rtas_cookie 0x67781e15 +struct hvc_struct *hvc_rtas_dev; + +static int rtascons_put_char_token = RTAS_UNKNOWN_SERVICE; +static int rtascons_get_char_token = RTAS_UNKNOWN_SERVICE; + +static inline int hvc_rtas_write_console(uint32_t vtermno, const char *buf, +		int count) +{ +	int i; + +	for (i = 0; i < count; i++) { +		if (rtas_call(rtascons_put_char_token, 1, 1, NULL, buf[i])) +			break; +	} + +	return i; +} + +static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count) +{ +	int i, c; + +	for (i = 0; i < count; i++) { +		if (rtas_call(rtascons_get_char_token, 0, 2, &c)) +			break; + +		buf[i] = c; +	} + +	return i; +} + +static const struct hv_ops hvc_rtas_get_put_ops = { +	.get_chars = hvc_rtas_read_console, +	.put_chars = hvc_rtas_write_console, +}; + +static int __init hvc_rtas_init(void) +{ +	struct hvc_struct *hp; + +	if (rtascons_put_char_token == RTAS_UNKNOWN_SERVICE) +		rtascons_put_char_token = rtas_token("put-term-char"); +	if (rtascons_put_char_token == RTAS_UNKNOWN_SERVICE) +		return -EIO; + +	if (rtascons_get_char_token == RTAS_UNKNOWN_SERVICE) +		rtascons_get_char_token = rtas_token("get-term-char"); +	if (rtascons_get_char_token == RTAS_UNKNOWN_SERVICE) +		return -EIO; + +	BUG_ON(hvc_rtas_dev); + +	/* Allocate an hvc_struct for the console device we instantiated +	 * earlier.  Save off hp so that we can return it on exit */ +	hp = hvc_alloc(hvc_rtas_cookie, 0, &hvc_rtas_get_put_ops, 16); +	if (IS_ERR(hp)) +		return PTR_ERR(hp); + +	hvc_rtas_dev = hp; + +	return 0; +} +device_initcall(hvc_rtas_init); + +/* This will happen prior to module init.  There is no tty at this time? */ +static int __init hvc_rtas_console_init(void) +{ +	rtascons_put_char_token = rtas_token("put-term-char"); +	if (rtascons_put_char_token == RTAS_UNKNOWN_SERVICE) +		return -EIO; + +	rtascons_get_char_token = rtas_token("get-term-char"); +	if (rtascons_get_char_token == RTAS_UNKNOWN_SERVICE) +		return -EIO; + +	hvc_instantiate(hvc_rtas_cookie, 0, &hvc_rtas_get_put_ops); +	add_preferred_console("hvc", 0, NULL); + +	return 0; +} +console_initcall(hvc_rtas_console_init); diff --git a/drivers/tty/hvc/hvc_tile.c b/drivers/tty/hvc/hvc_tile.c new file mode 100644 index 00000000000..df374860037 --- /dev/null +++ b/drivers/tty/hvc/hvc_tile.c @@ -0,0 +1,205 @@ +/* + * Copyright 2010 Tilera Corporation. All Rights Reserved. + * + *   This program is free software; you can redistribute it and/or + *   modify it under the terms of the GNU General Public License + *   as published by the Free Software Foundation, version 2. + * + *   This program is distributed in the hope that it will be useful, but + *   WITHOUT ANY WARRANTY; without even the implied warranty of + *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + *   NON INFRINGEMENT.  See the GNU General Public License for + *   more details. + * + * Tilera TILE Processor hypervisor console + */ + +#include <linux/console.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/moduleparam.h> +#include <linux/platform_device.h> +#include <linux/types.h> + +#include <asm/setup.h> +#include <arch/sim_def.h> + +#include <hv/hypervisor.h> + +#include "hvc_console.h" + +static int use_sim_console; +static int __init sim_console(char *str) +{ +	use_sim_console = 1; +	return 0; +} +early_param("sim_console", sim_console); + +int tile_console_write(const char *buf, int count) +{ +	if (unlikely(use_sim_console)) { +		int i; +		for (i = 0; i < count; ++i) +			__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC | +				     (buf[i] << _SIM_CONTROL_OPERATOR_BITS)); +		__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC | +			     (SIM_PUTC_FLUSH_BINARY << +			      _SIM_CONTROL_OPERATOR_BITS)); +		return 0; +	} else { +		return hv_console_write((HV_VirtAddr)buf, count); +	} +} + +static int hvc_tile_put_chars(uint32_t vt, const char *buf, int count) +{ +	return tile_console_write(buf, count); +} + +static int hvc_tile_get_chars(uint32_t vt, char *buf, int count) +{ +	int i, c; + +	for (i = 0; i < count; ++i) { +		c = hv_console_read_if_ready(); +		if (c < 0) +			break; +		buf[i] = c; +	} + +	return i; +} + +#ifdef __tilegx__ +/* + * IRQ based callbacks. + */ +static int hvc_tile_notifier_add_irq(struct hvc_struct *hp, int irq) +{ +	int rc; +	int cpu = raw_smp_processor_id();  /* Choose an arbitrary cpu */ +	HV_Coord coord = { .x = cpu_x(cpu), .y = cpu_y(cpu) }; + +	rc = notifier_add_irq(hp, irq); +	if (rc) +		return rc; + +	/* +	 * Request that the hypervisor start sending us interrupts. +	 * If the hypervisor returns an error, we still return 0, so that +	 * we can fall back to polling. +	 */ +	if (hv_console_set_ipi(KERNEL_PL, irq, coord) < 0) +		notifier_del_irq(hp, irq); + +	return 0; +} + +static void hvc_tile_notifier_del_irq(struct hvc_struct *hp, int irq) +{ +	HV_Coord coord = { 0, 0 }; + +	/* Tell the hypervisor to stop sending us interrupts. */ +	hv_console_set_ipi(KERNEL_PL, -1, coord); + +	notifier_del_irq(hp, irq); +} + +static void hvc_tile_notifier_hangup_irq(struct hvc_struct *hp, int irq) +{ +	hvc_tile_notifier_del_irq(hp, irq); +} +#endif + +static const struct hv_ops hvc_tile_get_put_ops = { +	.get_chars = hvc_tile_get_chars, +	.put_chars = hvc_tile_put_chars, +#ifdef __tilegx__ +	.notifier_add = hvc_tile_notifier_add_irq, +	.notifier_del = hvc_tile_notifier_del_irq, +	.notifier_hangup = hvc_tile_notifier_hangup_irq, +#endif +}; + + +#ifdef __tilegx__ +static int hvc_tile_probe(struct platform_device *pdev) +{ +	struct hvc_struct *hp; +	int tile_hvc_irq; + +	/* Create our IRQ and register it. */ +	tile_hvc_irq = irq_alloc_hwirq(-1); +	if (!tile_hvc_irq) +		return -ENXIO; + +	tile_irq_activate(tile_hvc_irq, TILE_IRQ_PERCPU); +	hp = hvc_alloc(0, tile_hvc_irq, &hvc_tile_get_put_ops, 128); +	if (IS_ERR(hp)) { +		irq_free_hwirq(tile_hvc_irq); +		return PTR_ERR(hp); +	} +	dev_set_drvdata(&pdev->dev, hp); + +	return 0; +} + +static int hvc_tile_remove(struct platform_device *pdev) +{ +	int rc; +	struct hvc_struct *hp = dev_get_drvdata(&pdev->dev); + +	rc = hvc_remove(hp); +	if (rc == 0) +		irq_free_hwirq(hp->data); + +	return rc; +} + +static void hvc_tile_shutdown(struct platform_device *pdev) +{ +	struct hvc_struct *hp = dev_get_drvdata(&pdev->dev); + +	hvc_tile_notifier_del_irq(hp, hp->data); +} + +static struct platform_device hvc_tile_pdev = { +	.name           = "hvc-tile", +	.id             = 0, +}; + +static struct platform_driver hvc_tile_driver = { +	.probe          = hvc_tile_probe, +	.remove         = hvc_tile_remove, +	.shutdown	= hvc_tile_shutdown, +	.driver         = { +		.name   = "hvc-tile", +		.owner  = THIS_MODULE, +	} +}; +#endif + +static int __init hvc_tile_console_init(void) +{ +	hvc_instantiate(0, 0, &hvc_tile_get_put_ops); +	add_preferred_console("hvc", 0, NULL); +	return 0; +} +console_initcall(hvc_tile_console_init); + +static int __init hvc_tile_init(void) +{ +#ifndef __tilegx__ +	struct hvc_struct *hp; +	hp = hvc_alloc(0, 0, &hvc_tile_get_put_ops, 128); +	return PTR_ERR_OR_ZERO(hp); +#else +	platform_device_register(&hvc_tile_pdev); +	return platform_driver_register(&hvc_tile_driver); +#endif +} +device_initcall(hvc_tile_init); diff --git a/drivers/tty/hvc/hvc_udbg.c b/drivers/tty/hvc/hvc_udbg.c new file mode 100644 index 00000000000..9cf573d06a2 --- /dev/null +++ b/drivers/tty/hvc/hvc_udbg.c @@ -0,0 +1,95 @@ +/* + * udbg interface to hvc_console.c + * + * (C) Copyright David Gibson, IBM Corporation 2008. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA + */ + +#include <linux/console.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/moduleparam.h> +#include <linux/types.h> +#include <linux/irq.h> + +#include <asm/udbg.h> + +#include "hvc_console.h" + +struct hvc_struct *hvc_udbg_dev; + +static int hvc_udbg_put(uint32_t vtermno, const char *buf, int count) +{ +	int i; + +	for (i = 0; i < count && udbg_putc; i++) +		udbg_putc(buf[i]); + +	return i; +} + +static int hvc_udbg_get(uint32_t vtermno, char *buf, int count) +{ +	int i, c; + +	if (!udbg_getc_poll) +		return 0; + +	for (i = 0; i < count; i++) { +		if ((c = udbg_getc_poll()) == -1) +			break; +		buf[i] = c; +	} + +	return i; +} + +static const struct hv_ops hvc_udbg_ops = { +	.get_chars = hvc_udbg_get, +	.put_chars = hvc_udbg_put, +}; + +static int __init hvc_udbg_init(void) +{ +	struct hvc_struct *hp; + +	if (!udbg_putc) +		return -ENODEV; + +	BUG_ON(hvc_udbg_dev); + +	hp = hvc_alloc(0, 0, &hvc_udbg_ops, 16); +	if (IS_ERR(hp)) +		return PTR_ERR(hp); + +	hvc_udbg_dev = hp; + +	return 0; +} +device_initcall(hvc_udbg_init); + +static int __init hvc_udbg_console_init(void) +{ +	if (!udbg_putc) +		return -ENODEV; + +	hvc_instantiate(0, 0, &hvc_udbg_ops); +	add_preferred_console("hvc", 0, NULL); + +	return 0; +} +console_initcall(hvc_udbg_console_init); diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c new file mode 100644 index 00000000000..b594abfbf21 --- /dev/null +++ b/drivers/tty/hvc/hvc_vio.c @@ -0,0 +1,499 @@ +/* + * vio driver interface to hvc_console.c + * + * This code was moved here to allow the remaining code to be reused as a + * generic polling mode with semi-reliable transport driver core to the + * console and tty subsystems. + * + * + * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM + * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM + * Copyright (C) 2004 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. + * Copyright (C) 2004 IBM Corporation + * + * Additional Author(s): + *  Ryan S. Arnold <rsa@us.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA + * + * TODO: + * + *   - handle error in sending hvsi protocol packets + *   - retry nego on subsequent sends ? + */ + +#undef DEBUG + +#include <linux/types.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/console.h> +#include <linux/module.h> + +#include <asm/hvconsole.h> +#include <asm/vio.h> +#include <asm/prom.h> +#include <asm/hvsi.h> +#include <asm/udbg.h> +#include <asm/machdep.h> + +#include "hvc_console.h" + +static const char hvc_driver_name[] = "hvc_console"; + +static struct vio_device_id hvc_driver_table[] = { +	{"serial", "hvterm1"}, +#ifndef HVC_OLD_HVSI +	{"serial", "hvterm-protocol"}, +#endif +	{ "", "" } +}; +MODULE_DEVICE_TABLE(vio, hvc_driver_table); + +typedef enum hv_protocol { +	HV_PROTOCOL_RAW, +	HV_PROTOCOL_HVSI +} hv_protocol_t; + +struct hvterm_priv { +	u32			termno;	/* HV term number */ +	hv_protocol_t		proto;	/* Raw data or HVSI packets */ +	struct hvsi_priv	hvsi;	/* HVSI specific data */ +	spinlock_t		buf_lock; +	char			buf[SIZE_VIO_GET_CHARS]; +	int			left; +	int			offset; +}; +static struct hvterm_priv *hvterm_privs[MAX_NR_HVC_CONSOLES]; +/* For early boot console */ +static struct hvterm_priv hvterm_priv0; + +static int hvterm_raw_get_chars(uint32_t vtermno, char *buf, int count) +{ +	struct hvterm_priv *pv = hvterm_privs[vtermno]; +	unsigned long i; +	unsigned long flags; +	int got; + +	if (WARN_ON(!pv)) +		return 0; + +	spin_lock_irqsave(&pv->buf_lock, flags); + +	if (pv->left == 0) { +		pv->offset = 0; +		pv->left = hvc_get_chars(pv->termno, pv->buf, count); + +		/* +		 * Work around a HV bug where it gives us a null +		 * after every \r.  -- paulus +		 */ +		for (i = 1; i < pv->left; ++i) { +			if (pv->buf[i] == 0 && pv->buf[i-1] == '\r') { +				--pv->left; +				if (i < pv->left) { +					memmove(&pv->buf[i], &pv->buf[i+1], +						pv->left - i); +				} +			} +		} +	} + +	got = min(count, pv->left); +	memcpy(buf, &pv->buf[pv->offset], got); +	pv->offset += got; +	pv->left -= got; + +	spin_unlock_irqrestore(&pv->buf_lock, flags); + +	return got; +} + +static int hvterm_raw_put_chars(uint32_t vtermno, const char *buf, int count) +{ +	struct hvterm_priv *pv = hvterm_privs[vtermno]; + +	if (WARN_ON(!pv)) +		return 0; + +	return hvc_put_chars(pv->termno, buf, count); +} + +static const struct hv_ops hvterm_raw_ops = { +	.get_chars = hvterm_raw_get_chars, +	.put_chars = hvterm_raw_put_chars, +	.notifier_add = notifier_add_irq, +	.notifier_del = notifier_del_irq, +	.notifier_hangup = notifier_hangup_irq, +}; + +static int hvterm_hvsi_get_chars(uint32_t vtermno, char *buf, int count) +{ +	struct hvterm_priv *pv = hvterm_privs[vtermno]; + +	if (WARN_ON(!pv)) +		return 0; + +	return hvsilib_get_chars(&pv->hvsi, buf, count); +} + +static int hvterm_hvsi_put_chars(uint32_t vtermno, const char *buf, int count) +{ +	struct hvterm_priv *pv = hvterm_privs[vtermno]; + +	if (WARN_ON(!pv)) +		return 0; + +	return hvsilib_put_chars(&pv->hvsi, buf, count); +} + +static int hvterm_hvsi_open(struct hvc_struct *hp, int data) +{ +	struct hvterm_priv *pv = hvterm_privs[hp->vtermno]; +	int rc; + +	pr_devel("HVSI@%x: open !\n", pv->termno); + +	rc = notifier_add_irq(hp, data); +	if (rc) +		return rc; + +	return hvsilib_open(&pv->hvsi, hp); +} + +static void hvterm_hvsi_close(struct hvc_struct *hp, int data) +{ +	struct hvterm_priv *pv = hvterm_privs[hp->vtermno]; + +	pr_devel("HVSI@%x: do close !\n", pv->termno); + +	hvsilib_close(&pv->hvsi, hp); + +	notifier_del_irq(hp, data); +} + +void hvterm_hvsi_hangup(struct hvc_struct *hp, int data) +{ +	struct hvterm_priv *pv = hvterm_privs[hp->vtermno]; + +	pr_devel("HVSI@%x: do hangup !\n", pv->termno); + +	hvsilib_close(&pv->hvsi, hp); + +	notifier_hangup_irq(hp, data); +} + +static int hvterm_hvsi_tiocmget(struct hvc_struct *hp) +{ +	struct hvterm_priv *pv = hvterm_privs[hp->vtermno]; + +	if (!pv) +		return -EINVAL; +	return pv->hvsi.mctrl; +} + +static int hvterm_hvsi_tiocmset(struct hvc_struct *hp, unsigned int set, +				unsigned int clear) +{ +	struct hvterm_priv *pv = hvterm_privs[hp->vtermno]; + +	pr_devel("HVSI@%x: Set modem control, set=%x,clr=%x\n", +		 pv->termno, set, clear); + +	if (set & TIOCM_DTR) +		hvsilib_write_mctrl(&pv->hvsi, 1); +	else if (clear & TIOCM_DTR) +		hvsilib_write_mctrl(&pv->hvsi, 0); + +	return 0; +} + +static const struct hv_ops hvterm_hvsi_ops = { +	.get_chars = hvterm_hvsi_get_chars, +	.put_chars = hvterm_hvsi_put_chars, +	.notifier_add = hvterm_hvsi_open, +	.notifier_del = hvterm_hvsi_close, +	.notifier_hangup = hvterm_hvsi_hangup, +	.tiocmget = hvterm_hvsi_tiocmget, +	.tiocmset = hvterm_hvsi_tiocmset, +}; + +static void udbg_hvc_putc(char c) +{ +	int count = -1; + +	if (!hvterm_privs[0]) +		return; + +	if (c == '\n') +		udbg_hvc_putc('\r'); + +	do { +		switch(hvterm_privs[0]->proto) { +		case HV_PROTOCOL_RAW: +			count = hvterm_raw_put_chars(0, &c, 1); +			break; +		case HV_PROTOCOL_HVSI: +			count = hvterm_hvsi_put_chars(0, &c, 1); +			break; +		} +	} while(count == 0); +} + +static int udbg_hvc_getc_poll(void) +{ +	int rc = 0; +	char c; + +	if (!hvterm_privs[0]) +		return -1; + +	switch(hvterm_privs[0]->proto) { +	case HV_PROTOCOL_RAW: +		rc = hvterm_raw_get_chars(0, &c, 1); +		break; +	case HV_PROTOCOL_HVSI: +		rc = hvterm_hvsi_get_chars(0, &c, 1); +		break; +	} +	if (!rc) +		return -1; +	return c; +} + +static int udbg_hvc_getc(void) +{ +	int ch; + +	if (!hvterm_privs[0]) +		return -1; + +	for (;;) { +		ch = udbg_hvc_getc_poll(); +		if (ch == -1) { +			/* This shouldn't be needed...but... */ +			volatile unsigned long delay; +			for (delay=0; delay < 2000000; delay++) +				; +		} else { +			return ch; +		} +	} +} + +static int hvc_vio_probe(struct vio_dev *vdev, +				   const struct vio_device_id *id) +{ +	const struct hv_ops *ops; +	struct hvc_struct *hp; +	struct hvterm_priv *pv; +	hv_protocol_t proto; +	int i, termno = -1; + +	/* probed with invalid parameters. */ +	if (!vdev || !id) +		return -EPERM; + +	if (of_device_is_compatible(vdev->dev.of_node, "hvterm1")) { +		proto = HV_PROTOCOL_RAW; +		ops = &hvterm_raw_ops; +	} else if (of_device_is_compatible(vdev->dev.of_node, "hvterm-protocol")) { +		proto = HV_PROTOCOL_HVSI; +		ops = &hvterm_hvsi_ops; +	} else { +		pr_err("hvc_vio: Unknown protocol for %s\n", vdev->dev.of_node->full_name); +		return -ENXIO; +	} + +	pr_devel("hvc_vio_probe() device %s, using %s protocol\n", +		 vdev->dev.of_node->full_name, +		 proto == HV_PROTOCOL_RAW ? "raw" : "hvsi"); + +	/* Is it our boot one ? */ +	if (hvterm_privs[0] == &hvterm_priv0 && +	    vdev->unit_address == hvterm_priv0.termno) { +		pv = hvterm_privs[0]; +		termno = 0; +		pr_devel("->boot console, using termno 0\n"); +	} +	/* nope, allocate a new one */ +	else { +		for (i = 0; i < MAX_NR_HVC_CONSOLES && termno < 0; i++) +			if (!hvterm_privs[i]) +				termno = i; +		pr_devel("->non-boot console, using termno %d\n", termno); +		if (termno < 0) +			return -ENODEV; +		pv = kzalloc(sizeof(struct hvterm_priv), GFP_KERNEL); +		if (!pv) +			return -ENOMEM; +		pv->termno = vdev->unit_address; +		pv->proto = proto; +		spin_lock_init(&pv->buf_lock); +		hvterm_privs[termno] = pv; +		hvsilib_init(&pv->hvsi, hvc_get_chars, hvc_put_chars, +			     pv->termno, 0); +	} + +	hp = hvc_alloc(termno, vdev->irq, ops, MAX_VIO_PUT_CHARS); +	if (IS_ERR(hp)) +		return PTR_ERR(hp); +	dev_set_drvdata(&vdev->dev, hp); + +	/* register udbg if it's not there already for console 0 */ +	if (hp->index == 0 && !udbg_putc) { +		udbg_putc = udbg_hvc_putc; +		udbg_getc = udbg_hvc_getc; +		udbg_getc_poll = udbg_hvc_getc_poll; +	} + +	return 0; +} + +static int hvc_vio_remove(struct vio_dev *vdev) +{ +	struct hvc_struct *hp = dev_get_drvdata(&vdev->dev); +	int rc, termno; + +	termno = hp->vtermno; +	rc = hvc_remove(hp); +	if (rc == 0) { +		if (hvterm_privs[termno] != &hvterm_priv0) +			kfree(hvterm_privs[termno]); +		hvterm_privs[termno] = NULL; +	} +	return rc; +} + +static struct vio_driver hvc_vio_driver = { +	.id_table	= hvc_driver_table, +	.probe		= hvc_vio_probe, +	.remove		= hvc_vio_remove, +	.name		= hvc_driver_name, +}; + +static int __init hvc_vio_init(void) +{ +	int rc; + +	/* Register as a vio device to receive callbacks */ +	rc = vio_register_driver(&hvc_vio_driver); + +	return rc; +} +module_init(hvc_vio_init); /* after drivers/char/hvc_console.c */ + +static void __exit hvc_vio_exit(void) +{ +	vio_unregister_driver(&hvc_vio_driver); +} +module_exit(hvc_vio_exit); + +void __init hvc_vio_init_early(void) +{ +	struct device_node *stdout_node; +	const __be32 *termno; +	const char *name; +	const struct hv_ops *ops; + +	/* find the boot console from /chosen/stdout */ +	if (!of_chosen) +		return; +	name = of_get_property(of_chosen, "linux,stdout-path", NULL); +	if (name == NULL) +		return; +	stdout_node = of_find_node_by_path(name); +	if (!stdout_node) +		return; +	name = of_get_property(stdout_node, "name", NULL); +	if (!name) { +		printk(KERN_WARNING "stdout node missing 'name' property!\n"); +		goto out; +	} + +	/* Check if it's a virtual terminal */ +	if (strncmp(name, "vty", 3) != 0) +		goto out; +	termno = of_get_property(stdout_node, "reg", NULL); +	if (termno == NULL) +		goto out; +	hvterm_priv0.termno = of_read_number(termno, 1); +	spin_lock_init(&hvterm_priv0.buf_lock); +	hvterm_privs[0] = &hvterm_priv0; + +	/* Check the protocol */ +	if (of_device_is_compatible(stdout_node, "hvterm1")) { +		hvterm_priv0.proto = HV_PROTOCOL_RAW; +		ops = &hvterm_raw_ops; +	} +	else if (of_device_is_compatible(stdout_node, "hvterm-protocol")) { +		hvterm_priv0.proto = HV_PROTOCOL_HVSI; +		ops = &hvterm_hvsi_ops; +		hvsilib_init(&hvterm_priv0.hvsi, hvc_get_chars, hvc_put_chars, +			     hvterm_priv0.termno, 1); +		/* HVSI, perform the handshake now */ +		hvsilib_establish(&hvterm_priv0.hvsi); +	} else +		goto out; +	udbg_putc = udbg_hvc_putc; +	udbg_getc = udbg_hvc_getc; +	udbg_getc_poll = udbg_hvc_getc_poll; +#ifdef HVC_OLD_HVSI +	/* When using the old HVSI driver don't register the HVC +	 * backend for HVSI, only do udbg +	 */ +	if (hvterm_priv0.proto == HV_PROTOCOL_HVSI) +		goto out; +#endif +	/* Check whether the user has requested a different console. */ +	if (!strstr(cmd_line, "console=")) +		add_preferred_console("hvc", 0, NULL); +	hvc_instantiate(0, 0, ops); +out: +	of_node_put(stdout_node); +} + +/* call this from early_init() for a working debug console on + * vterm capable LPAR machines + */ +#ifdef CONFIG_PPC_EARLY_DEBUG_LPAR +void __init udbg_init_debug_lpar(void) +{ +	hvterm_privs[0] = &hvterm_priv0; +	hvterm_priv0.termno = 0; +	hvterm_priv0.proto = HV_PROTOCOL_RAW; +	spin_lock_init(&hvterm_priv0.buf_lock); +	udbg_putc = udbg_hvc_putc; +	udbg_getc = udbg_hvc_getc; +	udbg_getc_poll = udbg_hvc_getc_poll; +} +#endif /* CONFIG_PPC_EARLY_DEBUG_LPAR */ + +#ifdef CONFIG_PPC_EARLY_DEBUG_LPAR_HVSI +void __init udbg_init_debug_lpar_hvsi(void) +{ +	hvterm_privs[0] = &hvterm_priv0; +	hvterm_priv0.termno = CONFIG_PPC_EARLY_DEBUG_HVSI_VTERMNO; +	hvterm_priv0.proto = HV_PROTOCOL_HVSI; +	spin_lock_init(&hvterm_priv0.buf_lock); +	udbg_putc = udbg_hvc_putc; +	udbg_getc = udbg_hvc_getc; +	udbg_getc_poll = udbg_hvc_getc_poll; +	hvsilib_init(&hvterm_priv0.hvsi, hvc_get_chars, hvc_put_chars, +		     hvterm_priv0.termno, 1); +	hvsilib_establish(&hvterm_priv0.hvsi); +} +#endif /* CONFIG_PPC_EARLY_DEBUG_LPAR_HVSI */ diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c new file mode 100644 index 00000000000..2dc2831840c --- /dev/null +++ b/drivers/tty/hvc/hvc_xen.c @@ -0,0 +1,658 @@ +/* + * xen console driver interface to hvc_console.c + * + * (c) 2007 Gerd Hoffmann <kraxel@suse.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA + */ + +#include <linux/console.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/irq.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/list.h> + +#include <asm/io.h> +#include <asm/xen/hypervisor.h> + +#include <xen/xen.h> +#include <xen/interface/xen.h> +#include <xen/hvm.h> +#include <xen/grant_table.h> +#include <xen/page.h> +#include <xen/events.h> +#include <xen/interface/io/console.h> +#include <xen/interface/sched.h> +#include <xen/hvc-console.h> +#include <xen/xenbus.h> + +#include "hvc_console.h" + +#define HVC_COOKIE   0x58656e /* "Xen" in hex */ + +struct xencons_info { +	struct list_head list; +	struct xenbus_device *xbdev; +	struct xencons_interface *intf; +	unsigned int evtchn; +	struct hvc_struct *hvc; +	int irq; +	int vtermno; +	grant_ref_t gntref; +}; + +static LIST_HEAD(xenconsoles); +static DEFINE_SPINLOCK(xencons_lock); + +/* ------------------------------------------------------------------ */ + +static struct xencons_info *vtermno_to_xencons(int vtermno) +{ +	struct xencons_info *entry, *n, *ret = NULL; + +	if (list_empty(&xenconsoles)) +			return NULL; + +	list_for_each_entry_safe(entry, n, &xenconsoles, list) { +		if (entry->vtermno == vtermno) { +			ret  = entry; +			break; +		} +	} + +	return ret; +} + +static inline int xenbus_devid_to_vtermno(int devid) +{ +	return devid + HVC_COOKIE; +} + +static inline void notify_daemon(struct xencons_info *cons) +{ +	/* Use evtchn: this is called early, before irq is set up. */ +	notify_remote_via_evtchn(cons->evtchn); +} + +static int __write_console(struct xencons_info *xencons, +		const char *data, int len) +{ +	XENCONS_RING_IDX cons, prod; +	struct xencons_interface *intf = xencons->intf; +	int sent = 0; + +	cons = intf->out_cons; +	prod = intf->out_prod; +	mb();			/* update queue values before going on */ +	BUG_ON((prod - cons) > sizeof(intf->out)); + +	while ((sent < len) && ((prod - cons) < sizeof(intf->out))) +		intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++]; + +	wmb();			/* write ring before updating pointer */ +	intf->out_prod = prod; + +	if (sent) +		notify_daemon(xencons); +	return sent; +} + +static int domU_write_console(uint32_t vtermno, const char *data, int len) +{ +	int ret = len; +	struct xencons_info *cons = vtermno_to_xencons(vtermno); +	if (cons == NULL) +		return -EINVAL; + +	/* +	 * Make sure the whole buffer is emitted, polling if +	 * necessary.  We don't ever want to rely on the hvc daemon +	 * because the most interesting console output is when the +	 * kernel is crippled. +	 */ +	while (len) { +		int sent = __write_console(cons, data, len); +		 +		data += sent; +		len -= sent; + +		if (unlikely(len)) +			HYPERVISOR_sched_op(SCHEDOP_yield, NULL); +	} + +	return ret; +} + +static int domU_read_console(uint32_t vtermno, char *buf, int len) +{ +	struct xencons_interface *intf; +	XENCONS_RING_IDX cons, prod; +	int recv = 0; +	struct xencons_info *xencons = vtermno_to_xencons(vtermno); +	if (xencons == NULL) +		return -EINVAL; +	intf = xencons->intf; + +	cons = intf->in_cons; +	prod = intf->in_prod; +	mb();			/* get pointers before reading ring */ +	BUG_ON((prod - cons) > sizeof(intf->in)); + +	while (cons != prod && recv < len) +		buf[recv++] = intf->in[MASK_XENCONS_IDX(cons++, intf->in)]; + +	mb();			/* read ring before consuming */ +	intf->in_cons = cons; + +	notify_daemon(xencons); +	return recv; +} + +static struct hv_ops domU_hvc_ops = { +	.get_chars = domU_read_console, +	.put_chars = domU_write_console, +	.notifier_add = notifier_add_irq, +	.notifier_del = notifier_del_irq, +	.notifier_hangup = notifier_hangup_irq, +}; + +static int dom0_read_console(uint32_t vtermno, char *buf, int len) +{ +	return HYPERVISOR_console_io(CONSOLEIO_read, len, buf); +} + +/* + * Either for a dom0 to write to the system console, or a domU with a + * debug version of Xen + */ +static int dom0_write_console(uint32_t vtermno, const char *str, int len) +{ +	int rc = HYPERVISOR_console_io(CONSOLEIO_write, len, (char *)str); +	if (rc < 0) +		return rc; + +	return len; +} + +static struct hv_ops dom0_hvc_ops = { +	.get_chars = dom0_read_console, +	.put_chars = dom0_write_console, +	.notifier_add = notifier_add_irq, +	.notifier_del = notifier_del_irq, +	.notifier_hangup = notifier_hangup_irq, +}; + +static int xen_hvm_console_init(void) +{ +	int r; +	uint64_t v = 0; +	unsigned long mfn; +	struct xencons_info *info; + +	if (!xen_hvm_domain()) +		return -ENODEV; + +	info = vtermno_to_xencons(HVC_COOKIE); +	if (!info) { +		info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL); +		if (!info) +			return -ENOMEM; +	} else if (info->intf != NULL) { +		/* already configured */ +		return 0; +	} +	/* +	 * If the toolstack (or the hypervisor) hasn't set these values, the +	 * default value is 0. Even though mfn = 0 and evtchn = 0 are +	 * theoretically correct values, in practice they never are and they +	 * mean that a legacy toolstack hasn't initialized the pv console correctly. +	 */ +	r = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v); +	if (r < 0 || v == 0) +		goto err; +	info->evtchn = v; +	v = 0; +	r = hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v); +	if (r < 0 || v == 0) +		goto err; +	mfn = v; +	info->intf = xen_remap(mfn << PAGE_SHIFT, PAGE_SIZE); +	if (info->intf == NULL) +		goto err; +	info->vtermno = HVC_COOKIE; + +	spin_lock(&xencons_lock); +	list_add_tail(&info->list, &xenconsoles); +	spin_unlock(&xencons_lock); + +	return 0; +err: +	kfree(info); +	return -ENODEV; +} + +static int xen_pv_console_init(void) +{ +	struct xencons_info *info; + +	if (!xen_pv_domain()) +		return -ENODEV; + +	if (!xen_start_info->console.domU.evtchn) +		return -ENODEV; + +	info = vtermno_to_xencons(HVC_COOKIE); +	if (!info) { +		info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL); +		if (!info) +			return -ENOMEM; +	} else if (info->intf != NULL) { +		/* already configured */ +		return 0; +	} +	info->evtchn = xen_start_info->console.domU.evtchn; +	info->intf = mfn_to_virt(xen_start_info->console.domU.mfn); +	info->vtermno = HVC_COOKIE; + +	spin_lock(&xencons_lock); +	list_add_tail(&info->list, &xenconsoles); +	spin_unlock(&xencons_lock); + +	return 0; +} + +static int xen_initial_domain_console_init(void) +{ +	struct xencons_info *info; + +	if (!xen_initial_domain()) +		return -ENODEV; + +	info = vtermno_to_xencons(HVC_COOKIE); +	if (!info) { +		info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL); +		if (!info) +			return -ENOMEM; +	} + +	info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0); +	info->vtermno = HVC_COOKIE; + +	spin_lock(&xencons_lock); +	list_add_tail(&info->list, &xenconsoles); +	spin_unlock(&xencons_lock); + +	return 0; +} + +void xen_console_resume(void) +{ +	struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE); +	if (info != NULL && info->irq) +		rebind_evtchn_irq(info->evtchn, info->irq); +} + +static void xencons_disconnect_backend(struct xencons_info *info) +{ +	if (info->irq > 0) +		unbind_from_irqhandler(info->irq, NULL); +	info->irq = 0; +	if (info->evtchn > 0) +		xenbus_free_evtchn(info->xbdev, info->evtchn); +	info->evtchn = 0; +	if (info->gntref > 0) +		gnttab_free_grant_references(info->gntref); +	info->gntref = 0; +	if (info->hvc != NULL) +		hvc_remove(info->hvc); +	info->hvc = NULL; +} + +static void xencons_free(struct xencons_info *info) +{ +	free_page((unsigned long)info->intf); +	info->intf = NULL; +	info->vtermno = 0; +	kfree(info); +} + +static int xen_console_remove(struct xencons_info *info) +{ +	xencons_disconnect_backend(info); +	spin_lock(&xencons_lock); +	list_del(&info->list); +	spin_unlock(&xencons_lock); +	if (info->xbdev != NULL) +		xencons_free(info); +	else { +		if (xen_hvm_domain()) +			iounmap(info->intf); +		kfree(info); +	} +	return 0; +} + +#ifdef CONFIG_HVC_XEN_FRONTEND +static struct xenbus_driver xencons_driver; + +static int xencons_remove(struct xenbus_device *dev) +{ +	return xen_console_remove(dev_get_drvdata(&dev->dev)); +} + +static int xencons_connect_backend(struct xenbus_device *dev, +				  struct xencons_info *info) +{ +	int ret, evtchn, devid, ref, irq; +	struct xenbus_transaction xbt; +	grant_ref_t gref_head; +	unsigned long mfn; + +	ret = xenbus_alloc_evtchn(dev, &evtchn); +	if (ret) +		return ret; +	info->evtchn = evtchn; +	irq = bind_evtchn_to_irq(evtchn); +	if (irq < 0) +		return irq; +	info->irq = irq; +	devid = dev->nodename[strlen(dev->nodename) - 1] - '0'; +	info->hvc = hvc_alloc(xenbus_devid_to_vtermno(devid), +			irq, &domU_hvc_ops, 256); +	if (IS_ERR(info->hvc)) +		return PTR_ERR(info->hvc); +	if (xen_pv_domain()) +		mfn = virt_to_mfn(info->intf); +	else +		mfn = __pa(info->intf) >> PAGE_SHIFT; +	ret = gnttab_alloc_grant_references(1, &gref_head); +	if (ret < 0) +		return ret; +	info->gntref = gref_head; +	ref = gnttab_claim_grant_reference(&gref_head); +	if (ref < 0) +		return ref; +	gnttab_grant_foreign_access_ref(ref, info->xbdev->otherend_id, +			mfn, 0); + + again: +	ret = xenbus_transaction_start(&xbt); +	if (ret) { +		xenbus_dev_fatal(dev, ret, "starting transaction"); +		return ret; +	} +	ret = xenbus_printf(xbt, dev->nodename, "ring-ref", "%d", ref); +	if (ret) +		goto error_xenbus; +	ret = xenbus_printf(xbt, dev->nodename, "port", "%u", +			    evtchn); +	if (ret) +		goto error_xenbus; +	ret = xenbus_printf(xbt, dev->nodename, "type", "ioemu"); +	if (ret) +		goto error_xenbus; +	ret = xenbus_transaction_end(xbt, 0); +	if (ret) { +		if (ret == -EAGAIN) +			goto again; +		xenbus_dev_fatal(dev, ret, "completing transaction"); +		return ret; +	} + +	xenbus_switch_state(dev, XenbusStateInitialised); +	return 0; + + error_xenbus: +	xenbus_transaction_end(xbt, 1); +	xenbus_dev_fatal(dev, ret, "writing xenstore"); +	return ret; +} + +static int xencons_probe(struct xenbus_device *dev, +				  const struct xenbus_device_id *id) +{ +	int ret, devid; +	struct xencons_info *info; + +	devid = dev->nodename[strlen(dev->nodename) - 1] - '0'; +	if (devid == 0) +		return -ENODEV; + +	info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL); +	if (!info) +		return -ENOMEM; +	dev_set_drvdata(&dev->dev, info); +	info->xbdev = dev; +	info->vtermno = xenbus_devid_to_vtermno(devid); +	info->intf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); +	if (!info->intf) +		goto error_nomem; + +	ret = xencons_connect_backend(dev, info); +	if (ret < 0) +		goto error; +	spin_lock(&xencons_lock); +	list_add_tail(&info->list, &xenconsoles); +	spin_unlock(&xencons_lock); + +	return 0; + + error_nomem: +	ret = -ENOMEM; +	xenbus_dev_fatal(dev, ret, "allocating device memory"); + error: +	xencons_disconnect_backend(info); +	xencons_free(info); +	return ret; +} + +static int xencons_resume(struct xenbus_device *dev) +{ +	struct xencons_info *info = dev_get_drvdata(&dev->dev); + +	xencons_disconnect_backend(info); +	memset(info->intf, 0, PAGE_SIZE); +	return xencons_connect_backend(dev, info); +} + +static void xencons_backend_changed(struct xenbus_device *dev, +				   enum xenbus_state backend_state) +{ +	switch (backend_state) { +	case XenbusStateReconfiguring: +	case XenbusStateReconfigured: +	case XenbusStateInitialising: +	case XenbusStateInitialised: +	case XenbusStateUnknown: +		break; + +	case XenbusStateInitWait: +		break; + +	case XenbusStateConnected: +		xenbus_switch_state(dev, XenbusStateConnected); +		break; + +	case XenbusStateClosed: +		if (dev->state == XenbusStateClosed) +			break; +		/* Missed the backend's CLOSING state -- fallthrough */ +	case XenbusStateClosing: +		xenbus_frontend_closed(dev); +		break; +	} +} + +static const struct xenbus_device_id xencons_ids[] = { +	{ "console" }, +	{ "" } +}; + + +static DEFINE_XENBUS_DRIVER(xencons, "xenconsole", +	.probe = xencons_probe, +	.remove = xencons_remove, +	.resume = xencons_resume, +	.otherend_changed = xencons_backend_changed, +); +#endif /* CONFIG_HVC_XEN_FRONTEND */ + +static int __init xen_hvc_init(void) +{ +	int r; +	struct xencons_info *info; +	const struct hv_ops *ops; + +	if (!xen_domain()) +		return -ENODEV; + +	if (xen_initial_domain()) { +		ops = &dom0_hvc_ops; +		r = xen_initial_domain_console_init(); +		if (r < 0) +			return r; +		info = vtermno_to_xencons(HVC_COOKIE); +	} else { +		ops = &domU_hvc_ops; +		if (xen_hvm_domain()) +			r = xen_hvm_console_init(); +		else +			r = xen_pv_console_init(); +		if (r < 0) +			return r; + +		info = vtermno_to_xencons(HVC_COOKIE); +		info->irq = bind_evtchn_to_irq(info->evtchn); +	} +	if (info->irq < 0) +		info->irq = 0; /* NO_IRQ */ +	else +		irq_set_noprobe(info->irq); + +	info->hvc = hvc_alloc(HVC_COOKIE, info->irq, ops, 256); +	if (IS_ERR(info->hvc)) { +		r = PTR_ERR(info->hvc); +		spin_lock(&xencons_lock); +		list_del(&info->list); +		spin_unlock(&xencons_lock); +		if (info->irq) +			unbind_from_irqhandler(info->irq, NULL); +		kfree(info); +		return r; +	} + +	r = 0; +#ifdef CONFIG_HVC_XEN_FRONTEND +	r = xenbus_register_frontend(&xencons_driver); +#endif +	return r; +} +device_initcall(xen_hvc_init); + +static int xen_cons_init(void) +{ +	const struct hv_ops *ops; + +	if (!xen_domain()) +		return 0; + +	if (xen_initial_domain()) +		ops = &dom0_hvc_ops; +	else { +		int r; +		ops = &domU_hvc_ops; + +		if (xen_hvm_domain()) +			r = xen_hvm_console_init(); +		else +			r = xen_pv_console_init(); +		if (r < 0) +			return r; +	} + +	hvc_instantiate(HVC_COOKIE, 0, ops); +	return 0; +} +console_initcall(xen_cons_init); + +#ifdef CONFIG_EARLY_PRINTK +static void xenboot_write_console(struct console *console, const char *string, +				  unsigned len) +{ +	unsigned int linelen, off = 0; +	const char *pos; + +	if (!xen_pv_domain()) +		return; + +	dom0_write_console(0, string, len); + +	if (xen_initial_domain()) +		return; + +	domU_write_console(0, "(early) ", 8); +	while (off < len && NULL != (pos = strchr(string+off, '\n'))) { +		linelen = pos-string+off; +		if (off + linelen > len) +			break; +		domU_write_console(0, string+off, linelen); +		domU_write_console(0, "\r\n", 2); +		off += linelen + 1; +	} +	if (off < len) +		domU_write_console(0, string+off, len-off); +} + +struct console xenboot_console = { +	.name		= "xenboot", +	.write		= xenboot_write_console, +	.flags		= CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME, +	.index		= -1, +}; +#endif	/* CONFIG_EARLY_PRINTK */ + +void xen_raw_console_write(const char *str) +{ +	ssize_t len = strlen(str); +	int rc = 0; + +	if (xen_domain()) { +		rc = dom0_write_console(0, str, len); +#ifdef CONFIG_X86 +		if (rc == -ENOSYS && xen_hvm_domain()) +			goto outb_print; + +	} else if (xen_cpuid_base()) { +		int i; +outb_print: +		for (i = 0; i < len; i++) +			outb(str[i], 0xe9); +#endif +	} +} + +void xen_raw_printk(const char *fmt, ...) +{ +	static char buf[512]; +	va_list ap; + +	va_start(ap, fmt); +	vsnprintf(buf, sizeof(buf), fmt, ap); +	va_end(ap); + +	xen_raw_console_write(buf); +} diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c new file mode 100644 index 00000000000..81e939e90c4 --- /dev/null +++ b/drivers/tty/hvc/hvcs.c @@ -0,0 +1,1616 @@ +/* + * IBM eServer Hypervisor Virtual Console Server Device Driver + * Copyright (C) 2003, 2004 IBM Corp. + *  Ryan S. Arnold (rsa@us.ibm.com) + * + *  This program is free software; you can redistribute it and/or modify + *  it under the terms of the GNU General Public License as published by + *  the Free Software Foundation; either version 2 of the License, or + *  (at your option) any later version. + * + *  This program is distributed in the hope that it will be useful, + *  but WITHOUT ANY WARRANTY; without even the implied warranty of + *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + *  GNU General Public License for more details. + * + *  You should have received a copy of the GNU General Public License + *  along with this program; if not, write to the Free Software + *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA + * + * Author(s) :  Ryan S. Arnold <rsa@us.ibm.com> + * + * This is the device driver for the IBM Hypervisor Virtual Console Server, + * "hvcs".  The IBM hvcs provides a tty driver interface to allow Linux + * user space applications access to the system consoles of logically + * partitioned operating systems, e.g. Linux, running on the same partitioned + * Power5 ppc64 system.  Physical hardware consoles per partition are not + * practical on this hardware so system consoles are accessed by this driver + * using inter-partition firmware interfaces to virtual terminal devices. + * + * A vty is known to the HMC as a "virtual serial server adapter".  It is a + * virtual terminal device that is created by firmware upon partition creation + * to act as a partitioned OS's console device. + * + * Firmware dynamically (via hotplug) exposes vty-servers to a running ppc64 + * Linux system upon their creation by the HMC or their exposure during boot. + * The non-user interactive backend of this driver is implemented as a vio + * device driver so that it can receive notification of vty-server lifetimes + * after it registers with the vio bus to handle vty-server probe and remove + * callbacks. + * + * Many vty-servers can be configured to connect to one vty, but a vty can + * only be actively connected to by a single vty-server, in any manner, at one + * time.  If the HMC is currently hosting the console for a target Linux + * partition; attempts to open the tty device to the partition's console using + * the hvcs on any partition will return -EBUSY with every open attempt until + * the HMC frees the connection between its vty-server and the desired + * partition's vty device.  Conversely, a vty-server may only be connected to + * a single vty at one time even though it may have several configured vty + * partner possibilities. + * + * Firmware does not provide notification of vty partner changes to this + * driver.  This means that an HMC Super Admin may add or remove partner vtys + * from a vty-server's partner list but the changes will not be signaled to + * the vty-server.  Firmware only notifies the driver when a vty-server is + * added or removed from the system.  To compensate for this deficiency, this + * driver implements a sysfs update attribute which provides a method for + * rescanning partner information upon a user's request. + * + * Each vty-server, prior to being exposed to this driver is reference counted + * using the 2.6 Linux kernel kref construct. + * + * For direction on installation and usage of this driver please reference + * Documentation/powerpc/hvcs.txt. + */ + +#include <linux/device.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/kref.h> +#include <linux/kthread.h> +#include <linux/list.h> +#include <linux/major.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/stat.h> +#include <linux/tty.h> +#include <linux/tty_flip.h> +#include <asm/hvconsole.h> +#include <asm/hvcserver.h> +#include <asm/uaccess.h> +#include <asm/vio.h> + +/* + * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00). + * Removed braces around single statements following conditionals.  Removed '= + * 0' after static int declarations since these default to zero.  Removed + * list_for_each_safe() and replaced with list_for_each_entry() in + * hvcs_get_by_index().  The 'safe' version is un-needed now that the driver is + * using spinlocks.  Changed spin_lock_irqsave() to spin_lock() when locking + * hvcs_structs_lock and hvcs_pi_lock since these are not touched in an int + * handler.  Initialized hvcs_structs_lock and hvcs_pi_lock to + * SPIN_LOCK_UNLOCKED at declaration time rather than in hvcs_module_init(). + * Added spin_lock around list_del() in destroy_hvcs_struct() to protect the + * list traversals from a deletion.  Removed '= NULL' from pointer declaration + * statements since they are initialized NULL by default.  Removed wmb() + * instances from hvcs_try_write().  They probably aren't needed with locking in + * place.  Added check and cleanup for hvcs_pi_buff = kmalloc() in + * hvcs_module_init().  Exposed hvcs_struct.index via a sysfs attribute so that + * the coupling between /dev/hvcs* and a vty-server can be automatically + * determined.  Moved kobject_put() in hvcs_open outside of the + * spin_unlock_irqrestore(). + * + * 1.3.1 -> 1.3.2 Changed method for determining hvcs_struct->index and had it + * align with how the tty layer always assigns the lowest index available.  This + * change resulted in a list of ints that denotes which indexes are available. + * Device additions and removals use the new hvcs_get_index() and + * hvcs_return_index() helper functions.  The list is created with + * hvsc_alloc_index_list() and it is destroyed with hvcs_free_index_list(). + * Without these fixes hotplug vty-server adapter support goes crazy with this + * driver if the user removes a vty-server adapter.  Moved free_irq() outside of + * the hvcs_final_close() function in order to get it out of the spinlock. + * Rearranged hvcs_close().  Cleaned up some printks and did some housekeeping + * on the changelog.  Removed local CLC_LENGTH and used HVCS_CLC_LENGTH from + * arch/powerepc/include/asm/hvcserver.h + * + * 1.3.2 -> 1.3.3 Replaced yield() in hvcs_close() with tty_wait_until_sent() to + * prevent possible lockup with realtime scheduling as similarly pointed out by + * akpm in hvc_console.  Changed resulted in the removal of hvcs_final_close() + * to reorder cleanup operations and prevent discarding of pending data during + * an hvcs_close().  Removed spinlock protection of hvcs_struct data members in + * hvcs_write_room() and hvcs_chars_in_buffer() because they aren't needed. + */ + +#define HVCS_DRIVER_VERSION "1.3.3" + +MODULE_AUTHOR("Ryan S. Arnold <rsa@us.ibm.com>"); +MODULE_DESCRIPTION("IBM hvcs (Hypervisor Virtual Console Server) Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(HVCS_DRIVER_VERSION); + +/* + * Wait this long per iteration while trying to push buffered data to the + * hypervisor before allowing the tty to complete a close operation. + */ +#define HVCS_CLOSE_WAIT (HZ/100) /* 1/10 of a second */ + +/* + * Since the Linux TTY code does not currently (2-04-2004) support dynamic + * addition of tty derived devices and we shouldn't allocate thousands of + * tty_device pointers when the number of vty-server & vty partner connections + * will most often be much lower than this, we'll arbitrarily allocate + * HVCS_DEFAULT_SERVER_ADAPTERS tty_structs and cdev's by default when we + * register the tty_driver. This can be overridden using an insmod parameter. + */ +#define HVCS_DEFAULT_SERVER_ADAPTERS	64 + +/* + * The user can't insmod with more than HVCS_MAX_SERVER_ADAPTERS hvcs device + * nodes as a sanity check.  Theoretically there can be over 1 Billion + * vty-server & vty partner connections. + */ +#define HVCS_MAX_SERVER_ADAPTERS	1024 + +/* + * We let Linux assign us a major number and we start the minors at zero.  There + * is no intuitive mapping between minor number and the target vty-server + * adapter except that each new vty-server adapter is always assigned to the + * smallest minor number available. + */ +#define HVCS_MINOR_START	0 + +/* + * The hcall interface involves putting 8 chars into each of two registers. + * We load up those 2 registers (in arch/powerpc/platforms/pseries/hvconsole.c) + * by casting char[16] to long[2].  It would work without __ALIGNED__, but a  + * little (tiny) bit slower because an unaligned load is slower than aligned  + * load. + */ +#define __ALIGNED__	__attribute__((__aligned__(8))) + +/* + * How much data can firmware send with each hvc_put_chars()?  Maybe this + * should be moved into an architecture specific area. + */ +#define HVCS_BUFF_LEN	16 + +/* + * This is the maximum amount of data we'll let the user send us (hvcs_write) at + * once in a chunk as a sanity check. + */ +#define HVCS_MAX_FROM_USER	4096 + +/* + * Be careful when adding flags to this line discipline.  Don't add anything + * that will cause echoing or we'll go into recursive loop echoing chars back + * and forth with the console drivers. + */ +static struct ktermios hvcs_tty_termios = { +	.c_iflag = IGNBRK | IGNPAR, +	.c_oflag = OPOST, +	.c_cflag = B38400 | CS8 | CREAD | HUPCL, +	.c_cc = INIT_C_CC, +	.c_ispeed = 38400, +	.c_ospeed = 38400 +}; + +/* + * This value is used to take the place of a command line parameter when the + * module is inserted.  It starts as -1 and stays as such if the user doesn't + * specify a module insmod parameter.  If they DO specify one then it is set to + * the value of the integer passed in. + */ +static int hvcs_parm_num_devs = -1; +module_param(hvcs_parm_num_devs, int, 0); + +static const char hvcs_driver_name[] = "hvcs"; +static const char hvcs_device_node[] = "hvcs"; +static const char hvcs_driver_string[] +	= "IBM hvcs (Hypervisor Virtual Console Server) Driver"; + +/* Status of partner info rescan triggered via sysfs. */ +static int hvcs_rescan_status; + +static struct tty_driver *hvcs_tty_driver; + +/* + * In order to be somewhat sane this driver always associates the hvcs_struct + * index element with the numerically equal tty->index.  This means that a + * hotplugged vty-server adapter will always map to the lowest index valued + * device node.  If vty-servers were hotplug removed from the system and then + * new ones added the new vty-server may have the largest slot number of all + * the vty-server adapters in the partition but it may have the lowest dev node + * index of all the adapters due to the hole left by the hotplug removed + * adapter.  There are a set of functions provided to get the lowest index for + * a new device as well as return the index to the list.  This list is allocated + * with a number of elements equal to the number of device nodes requested when + * the module was inserted. + */ +static int *hvcs_index_list; + +/* + * How large is the list?  This is kept for traversal since the list is + * dynamically created. + */ +static int hvcs_index_count; + +/* + * Used by the khvcsd to pick up I/O operations when the kernel_thread is + * already awake but potentially shifted to TASK_INTERRUPTIBLE state. + */ +static int hvcs_kicked; + +/* + * Use by the kthread construct for task operations like waking the sleeping + * thread and stopping the kthread. + */ +static struct task_struct *hvcs_task; + +/* + * We allocate this for the use of all of the hvcs_structs when they fetch + * partner info. + */ +static unsigned long *hvcs_pi_buff; + +/* Only allow one hvcs_struct to use the hvcs_pi_buff at a time. */ +static DEFINE_SPINLOCK(hvcs_pi_lock); + +/* One vty-server per hvcs_struct */ +struct hvcs_struct { +	struct tty_port port; +	spinlock_t lock; + +	/* +	 * This index identifies this hvcs device as the complement to a +	 * specific tty index. +	 */ +	unsigned int index; + +	/* +	 * Used to tell the driver kernel_thread what operations need to take +	 * place upon this hvcs_struct instance. +	 */ +	int todo_mask; + +	/* +	 * This buffer is required so that when hvcs_write_room() reports that +	 * it can send HVCS_BUFF_LEN characters that it will buffer the full +	 * HVCS_BUFF_LEN characters if need be.  This is essential for opost +	 * writes since they do not do high level buffering and expect to be +	 * able to send what the driver commits to sending buffering +	 * [e.g. tab to space conversions in n_tty.c opost()]. +	 */ +	char buffer[HVCS_BUFF_LEN]; +	int chars_in_buffer; + +	/* +	 * Any variable below is valid before a tty is connected and +	 * stays valid after the tty is disconnected.  These shouldn't be +	 * whacked until the kobject refcount reaches zero though some entries +	 * may be changed via sysfs initiatives. +	 */ +	int connected; /* is the vty-server currently connected to a vty? */ +	uint32_t p_unit_address; /* partner unit address */ +	uint32_t p_partition_ID; /* partner partition ID */ +	char p_location_code[HVCS_CLC_LENGTH + 1]; /* CLC + Null Term */ +	struct list_head next; /* list management */ +	struct vio_dev *vdev; +}; + +static LIST_HEAD(hvcs_structs); +static DEFINE_SPINLOCK(hvcs_structs_lock); +static DEFINE_MUTEX(hvcs_init_mutex); + +static void hvcs_unthrottle(struct tty_struct *tty); +static void hvcs_throttle(struct tty_struct *tty); +static irqreturn_t hvcs_handle_interrupt(int irq, void *dev_instance); + +static int hvcs_write(struct tty_struct *tty, +		const unsigned char *buf, int count); +static int hvcs_write_room(struct tty_struct *tty); +static int hvcs_chars_in_buffer(struct tty_struct *tty); + +static int hvcs_has_pi(struct hvcs_struct *hvcsd); +static void hvcs_set_pi(struct hvcs_partner_info *pi, +		struct hvcs_struct *hvcsd); +static int hvcs_get_pi(struct hvcs_struct *hvcsd); +static int hvcs_rescan_devices_list(void); + +static int hvcs_partner_connect(struct hvcs_struct *hvcsd); +static void hvcs_partner_free(struct hvcs_struct *hvcsd); + +static int hvcs_enable_device(struct hvcs_struct *hvcsd, +		uint32_t unit_address, unsigned int irq, struct vio_dev *dev); + +static int hvcs_open(struct tty_struct *tty, struct file *filp); +static void hvcs_close(struct tty_struct *tty, struct file *filp); +static void hvcs_hangup(struct tty_struct * tty); + +static int hvcs_probe(struct vio_dev *dev, +		const struct vio_device_id *id); +static int hvcs_remove(struct vio_dev *dev); +static int __init hvcs_module_init(void); +static void __exit hvcs_module_exit(void); +static int hvcs_initialize(void); + +#define HVCS_SCHED_READ	0x00000001 +#define HVCS_QUICK_READ	0x00000002 +#define HVCS_TRY_WRITE	0x00000004 +#define HVCS_READ_MASK	(HVCS_SCHED_READ | HVCS_QUICK_READ) + +static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod) +{ +	return dev_get_drvdata(&viod->dev); +} +/* The sysfs interface for the driver and devices */ + +static ssize_t hvcs_partner_vtys_show(struct device *dev, struct device_attribute *attr, char *buf) +{ +	struct vio_dev *viod = to_vio_dev(dev); +	struct hvcs_struct *hvcsd = from_vio_dev(viod); +	unsigned long flags; +	int retval; + +	spin_lock_irqsave(&hvcsd->lock, flags); +	retval = sprintf(buf, "%X\n", hvcsd->p_unit_address); +	spin_unlock_irqrestore(&hvcsd->lock, flags); +	return retval; +} +static DEVICE_ATTR(partner_vtys, S_IRUGO, hvcs_partner_vtys_show, NULL); + +static ssize_t hvcs_partner_clcs_show(struct device *dev, struct device_attribute *attr, char *buf) +{ +	struct vio_dev *viod = to_vio_dev(dev); +	struct hvcs_struct *hvcsd = from_vio_dev(viod); +	unsigned long flags; +	int retval; + +	spin_lock_irqsave(&hvcsd->lock, flags); +	retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]); +	spin_unlock_irqrestore(&hvcsd->lock, flags); +	return retval; +} +static DEVICE_ATTR(partner_clcs, S_IRUGO, hvcs_partner_clcs_show, NULL); + +static ssize_t hvcs_current_vty_store(struct device *dev, struct device_attribute *attr, const char * buf, +		size_t count) +{ +	/* +	 * Don't need this feature at the present time because firmware doesn't +	 * yet support multiple partners. +	 */ +	printk(KERN_INFO "HVCS: Denied current_vty change: -EPERM.\n"); +	return -EPERM; +} + +static ssize_t hvcs_current_vty_show(struct device *dev, struct device_attribute *attr, char *buf) +{ +	struct vio_dev *viod = to_vio_dev(dev); +	struct hvcs_struct *hvcsd = from_vio_dev(viod); +	unsigned long flags; +	int retval; + +	spin_lock_irqsave(&hvcsd->lock, flags); +	retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]); +	spin_unlock_irqrestore(&hvcsd->lock, flags); +	return retval; +} + +static DEVICE_ATTR(current_vty, +	S_IRUGO | S_IWUSR, hvcs_current_vty_show, hvcs_current_vty_store); + +static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribute *attr, const char *buf, +		size_t count) +{ +	struct vio_dev *viod = to_vio_dev(dev); +	struct hvcs_struct *hvcsd = from_vio_dev(viod); +	unsigned long flags; + +	/* writing a '0' to this sysfs entry will result in the disconnect. */ +	if (simple_strtol(buf, NULL, 0) != 0) +		return -EINVAL; + +	spin_lock_irqsave(&hvcsd->lock, flags); + +	if (hvcsd->port.count > 0) { +		spin_unlock_irqrestore(&hvcsd->lock, flags); +		printk(KERN_INFO "HVCS: vterm state unchanged.  " +				"The hvcs device node is still in use.\n"); +		return -EPERM; +	} + +	if (hvcsd->connected == 0) { +		spin_unlock_irqrestore(&hvcsd->lock, flags); +		printk(KERN_INFO "HVCS: vterm state unchanged. The" +				" vty-server is not connected to a vty.\n"); +		return -EPERM; +	} + +	hvcs_partner_free(hvcsd); +	printk(KERN_INFO "HVCS: Closed vty-server@%X and" +			" partner vty@%X:%d connection.\n", +			hvcsd->vdev->unit_address, +			hvcsd->p_unit_address, +			(uint32_t)hvcsd->p_partition_ID); + +	spin_unlock_irqrestore(&hvcsd->lock, flags); +	return count; +} + +static ssize_t hvcs_vterm_state_show(struct device *dev, struct device_attribute *attr, char *buf) +{ +	struct vio_dev *viod = to_vio_dev(dev); +	struct hvcs_struct *hvcsd = from_vio_dev(viod); +	unsigned long flags; +	int retval; + +	spin_lock_irqsave(&hvcsd->lock, flags); +	retval = sprintf(buf, "%d\n", hvcsd->connected); +	spin_unlock_irqrestore(&hvcsd->lock, flags); +	return retval; +} +static DEVICE_ATTR(vterm_state, S_IRUGO | S_IWUSR, +		hvcs_vterm_state_show, hvcs_vterm_state_store); + +static ssize_t hvcs_index_show(struct device *dev, struct device_attribute *attr, char *buf) +{ +	struct vio_dev *viod = to_vio_dev(dev); +	struct hvcs_struct *hvcsd = from_vio_dev(viod); +	unsigned long flags; +	int retval; + +	spin_lock_irqsave(&hvcsd->lock, flags); +	retval = sprintf(buf, "%d\n", hvcsd->index); +	spin_unlock_irqrestore(&hvcsd->lock, flags); +	return retval; +} + +static DEVICE_ATTR(index, S_IRUGO, hvcs_index_show, NULL); + +static struct attribute *hvcs_attrs[] = { +	&dev_attr_partner_vtys.attr, +	&dev_attr_partner_clcs.attr, +	&dev_attr_current_vty.attr, +	&dev_attr_vterm_state.attr, +	&dev_attr_index.attr, +	NULL, +}; + +static struct attribute_group hvcs_attr_group = { +	.attrs = hvcs_attrs, +}; + +static ssize_t hvcs_rescan_show(struct device_driver *ddp, char *buf) +{ +	/* A 1 means it is updating, a 0 means it is done updating */ +	return snprintf(buf, PAGE_SIZE, "%d\n", hvcs_rescan_status); +} + +static ssize_t hvcs_rescan_store(struct device_driver *ddp, const char * buf, +		size_t count) +{ +	if ((simple_strtol(buf, NULL, 0) != 1) +		&& (hvcs_rescan_status != 0)) +		return -EINVAL; + +	hvcs_rescan_status = 1; +	printk(KERN_INFO "HVCS: rescanning partner info for all" +		" vty-servers.\n"); +	hvcs_rescan_devices_list(); +	hvcs_rescan_status = 0; +	return count; +} + +static DRIVER_ATTR(rescan, +	S_IRUGO | S_IWUSR, hvcs_rescan_show, hvcs_rescan_store); + +static void hvcs_kick(void) +{ +	hvcs_kicked = 1; +	wmb(); +	wake_up_process(hvcs_task); +} + +static void hvcs_unthrottle(struct tty_struct *tty) +{ +	struct hvcs_struct *hvcsd = tty->driver_data; +	unsigned long flags; + +	spin_lock_irqsave(&hvcsd->lock, flags); +	hvcsd->todo_mask |= HVCS_SCHED_READ; +	spin_unlock_irqrestore(&hvcsd->lock, flags); +	hvcs_kick(); +} + +static void hvcs_throttle(struct tty_struct *tty) +{ +	struct hvcs_struct *hvcsd = tty->driver_data; +	unsigned long flags; + +	spin_lock_irqsave(&hvcsd->lock, flags); +	vio_disable_interrupts(hvcsd->vdev); +	spin_unlock_irqrestore(&hvcsd->lock, flags); +} + +/* + * If the device is being removed we don't have to worry about this interrupt + * handler taking any further interrupts because they are disabled which means + * the hvcs_struct will always be valid in this handler. + */ +static irqreturn_t hvcs_handle_interrupt(int irq, void *dev_instance) +{ +	struct hvcs_struct *hvcsd = dev_instance; + +	spin_lock(&hvcsd->lock); +	vio_disable_interrupts(hvcsd->vdev); +	hvcsd->todo_mask |= HVCS_SCHED_READ; +	spin_unlock(&hvcsd->lock); +	hvcs_kick(); + +	return IRQ_HANDLED; +} + +/* This function must be called with the hvcsd->lock held */ +static void hvcs_try_write(struct hvcs_struct *hvcsd) +{ +	uint32_t unit_address = hvcsd->vdev->unit_address; +	struct tty_struct *tty = hvcsd->port.tty; +	int sent; + +	if (hvcsd->todo_mask & HVCS_TRY_WRITE) { +		/* won't send partial writes */ +		sent = hvc_put_chars(unit_address, +				&hvcsd->buffer[0], +				hvcsd->chars_in_buffer ); +		if (sent > 0) { +			hvcsd->chars_in_buffer = 0; +			/* wmb(); */ +			hvcsd->todo_mask &= ~(HVCS_TRY_WRITE); +			/* wmb(); */ + +			/* +			 * We are still obligated to deliver the data to the +			 * hypervisor even if the tty has been closed because +			 * we committed to delivering it.  But don't try to wake +			 * a non-existent tty. +			 */ +			if (tty) { +				tty_wakeup(tty); +			} +		} +	} +} + +static int hvcs_io(struct hvcs_struct *hvcsd) +{ +	uint32_t unit_address; +	struct tty_struct *tty; +	char buf[HVCS_BUFF_LEN] __ALIGNED__; +	unsigned long flags; +	int got = 0; + +	spin_lock_irqsave(&hvcsd->lock, flags); + +	unit_address = hvcsd->vdev->unit_address; +	tty = hvcsd->port.tty; + +	hvcs_try_write(hvcsd); + +	if (!tty || test_bit(TTY_THROTTLED, &tty->flags)) { +		hvcsd->todo_mask &= ~(HVCS_READ_MASK); +		goto bail; +	} else if (!(hvcsd->todo_mask & (HVCS_READ_MASK))) +		goto bail; + +	/* remove the read masks */ +	hvcsd->todo_mask &= ~(HVCS_READ_MASK); + +	if (tty_buffer_request_room(&hvcsd->port, HVCS_BUFF_LEN) >= HVCS_BUFF_LEN) { +		got = hvc_get_chars(unit_address, +				&buf[0], +				HVCS_BUFF_LEN); +		tty_insert_flip_string(&hvcsd->port, buf, got); +	} + +	/* Give the TTY time to process the data we just sent. */ +	if (got) +		hvcsd->todo_mask |= HVCS_QUICK_READ; + +	spin_unlock_irqrestore(&hvcsd->lock, flags); +	/* This is synch because tty->low_latency == 1 */ +	if(got) +		tty_flip_buffer_push(&hvcsd->port); + +	if (!got) { +		/* Do this _after_ the flip_buffer_push */ +		spin_lock_irqsave(&hvcsd->lock, flags); +		vio_enable_interrupts(hvcsd->vdev); +		spin_unlock_irqrestore(&hvcsd->lock, flags); +	} + +	return hvcsd->todo_mask; + + bail: +	spin_unlock_irqrestore(&hvcsd->lock, flags); +	return hvcsd->todo_mask; +} + +static int khvcsd(void *unused) +{ +	struct hvcs_struct *hvcsd; +	int hvcs_todo_mask; + +	__set_current_state(TASK_RUNNING); + +	do { +		hvcs_todo_mask = 0; +		hvcs_kicked = 0; +		wmb(); + +		spin_lock(&hvcs_structs_lock); +		list_for_each_entry(hvcsd, &hvcs_structs, next) { +			hvcs_todo_mask |= hvcs_io(hvcsd); +		} +		spin_unlock(&hvcs_structs_lock); + +		/* +		 * If any of the hvcs adapters want to try a write or quick read +		 * don't schedule(), yield a smidgen then execute the hvcs_io +		 * thread again for those that want the write. +		 */ +		 if (hvcs_todo_mask & (HVCS_TRY_WRITE | HVCS_QUICK_READ)) { +			yield(); +			continue; +		} + +		set_current_state(TASK_INTERRUPTIBLE); +		if (!hvcs_kicked) +			schedule(); +		__set_current_state(TASK_RUNNING); +	} while (!kthread_should_stop()); + +	return 0; +} + +static struct vio_device_id hvcs_driver_table[] = { +	{"serial-server", "hvterm2"}, +	{ "", "" } +}; +MODULE_DEVICE_TABLE(vio, hvcs_driver_table); + +static void hvcs_return_index(int index) +{ +	/* Paranoia check */ +	if (!hvcs_index_list) +		return; +	if (index < 0 || index >= hvcs_index_count) +		return; +	if (hvcs_index_list[index] == -1) +		return; +	else +		hvcs_index_list[index] = -1; +} + +static void hvcs_destruct_port(struct tty_port *p) +{ +	struct hvcs_struct *hvcsd = container_of(p, struct hvcs_struct, port); +	struct vio_dev *vdev; +	unsigned long flags; + +	spin_lock(&hvcs_structs_lock); +	spin_lock_irqsave(&hvcsd->lock, flags); + +	/* the list_del poisons the pointers */ +	list_del(&(hvcsd->next)); + +	if (hvcsd->connected == 1) { +		hvcs_partner_free(hvcsd); +		printk(KERN_INFO "HVCS: Closed vty-server@%X and" +				" partner vty@%X:%d connection.\n", +				hvcsd->vdev->unit_address, +				hvcsd->p_unit_address, +				(uint32_t)hvcsd->p_partition_ID); +	} +	printk(KERN_INFO "HVCS: Destroyed hvcs_struct for vty-server@%X.\n", +			hvcsd->vdev->unit_address); + +	vdev = hvcsd->vdev; +	hvcsd->vdev = NULL; + +	hvcsd->p_unit_address = 0; +	hvcsd->p_partition_ID = 0; +	hvcs_return_index(hvcsd->index); +	memset(&hvcsd->p_location_code[0], 0x00, HVCS_CLC_LENGTH + 1); + +	spin_unlock_irqrestore(&hvcsd->lock, flags); +	spin_unlock(&hvcs_structs_lock); + +	sysfs_remove_group(&vdev->dev.kobj, &hvcs_attr_group); + +	kfree(hvcsd); +} + +static const struct tty_port_operations hvcs_port_ops = { +	.destruct = hvcs_destruct_port, +}; + +static int hvcs_get_index(void) +{ +	int i; +	/* Paranoia check */ +	if (!hvcs_index_list) { +		printk(KERN_ERR "HVCS: hvcs_index_list NOT valid!.\n"); +		return -EFAULT; +	} +	/* Find the numerically lowest first free index. */ +	for(i = 0; i < hvcs_index_count; i++) { +		if (hvcs_index_list[i] == -1) { +			hvcs_index_list[i] = 0; +			return i; +		} +	} +	return -1; +} + +static int hvcs_probe( +	struct vio_dev *dev, +	const struct vio_device_id *id) +{ +	struct hvcs_struct *hvcsd; +	int index, rc; +	int retval; + +	if (!dev || !id) { +		printk(KERN_ERR "HVCS: probed with invalid parameter.\n"); +		return -EPERM; +	} + +	/* Make sure we are properly initialized */ +	rc = hvcs_initialize(); +	if (rc) { +		pr_err("HVCS: Failed to initialize core driver.\n"); +		return rc; +	} + +	/* early to avoid cleanup on failure */ +	index = hvcs_get_index(); +	if (index < 0) { +		return -EFAULT; +	} + +	hvcsd = kzalloc(sizeof(*hvcsd), GFP_KERNEL); +	if (!hvcsd) +		return -ENODEV; + +	tty_port_init(&hvcsd->port); +	hvcsd->port.ops = &hvcs_port_ops; +	spin_lock_init(&hvcsd->lock); + +	hvcsd->vdev = dev; +	dev_set_drvdata(&dev->dev, hvcsd); + +	hvcsd->index = index; + +	/* hvcsd->index = ++hvcs_struct_count; */ +	hvcsd->chars_in_buffer = 0; +	hvcsd->todo_mask = 0; +	hvcsd->connected = 0; + +	/* +	 * This will populate the hvcs_struct's partner info fields for the +	 * first time. +	 */ +	if (hvcs_get_pi(hvcsd)) { +		printk(KERN_ERR "HVCS: Failed to fetch partner" +			" info for vty-server@%X on device probe.\n", +			hvcsd->vdev->unit_address); +	} + +	/* +	 * If a user app opens a tty that corresponds to this vty-server before +	 * the hvcs_struct has been added to the devices list then the user app +	 * will get -ENODEV. +	 */ +	spin_lock(&hvcs_structs_lock); +	list_add_tail(&(hvcsd->next), &hvcs_structs); +	spin_unlock(&hvcs_structs_lock); + +	retval = sysfs_create_group(&dev->dev.kobj, &hvcs_attr_group); +	if (retval) { +		printk(KERN_ERR "HVCS: Can't create sysfs attrs for vty-server@%X\n", +		       hvcsd->vdev->unit_address); +		return retval; +	} + +	printk(KERN_INFO "HVCS: vty-server@%X added to the vio bus.\n", dev->unit_address); + +	/* +	 * DON'T enable interrupts here because there is no user to receive the +	 * data. +	 */ +	return 0; +} + +static int hvcs_remove(struct vio_dev *dev) +{ +	struct hvcs_struct *hvcsd = dev_get_drvdata(&dev->dev); +	unsigned long flags; +	struct tty_struct *tty; + +	if (!hvcsd) +		return -ENODEV; + +	/* By this time the vty-server won't be getting any more interrupts */ + +	spin_lock_irqsave(&hvcsd->lock, flags); + +	tty = hvcsd->port.tty; + +	spin_unlock_irqrestore(&hvcsd->lock, flags); + +	/* +	 * Let the last holder of this object cause it to be removed, which +	 * would probably be tty_hangup below. +	 */ +	tty_port_put(&hvcsd->port); + +	/* +	 * The hangup is a scheduled function which will auto chain call +	 * hvcs_hangup.  The tty should always be valid at this time unless a +	 * simultaneous tty close already cleaned up the hvcs_struct. +	 */ +	if (tty) +		tty_hangup(tty); + +	printk(KERN_INFO "HVCS: vty-server@%X removed from the" +			" vio bus.\n", dev->unit_address); +	return 0; +}; + +static struct vio_driver hvcs_vio_driver = { +	.id_table	= hvcs_driver_table, +	.probe		= hvcs_probe, +	.remove		= hvcs_remove, +	.name		= hvcs_driver_name, +}; + +/* Only called from hvcs_get_pi please */ +static void hvcs_set_pi(struct hvcs_partner_info *pi, struct hvcs_struct *hvcsd) +{ +	hvcsd->p_unit_address = pi->unit_address; +	hvcsd->p_partition_ID  = pi->partition_ID; + +	/* copy the null-term char too */ +	strlcpy(&hvcsd->p_location_code[0], +			&pi->location_code[0], sizeof(hvcsd->p_location_code)); +} + +/* + * Traverse the list and add the partner info that is found to the hvcs_struct + * struct entry. NOTE: At this time I know that partner info will return a + * single entry but in the future there may be multiple partner info entries per + * vty-server and you'll want to zero out that list and reset it.  If for some + * reason you have an old version of this driver but there IS more than one + * partner info then hvcsd->p_* will hold the last partner info data from the + * firmware query.  A good way to update this code would be to replace the three + * partner info fields in hvcs_struct with a list of hvcs_partner_info + * instances. + * + * This function must be called with the hvcsd->lock held. + */ +static int hvcs_get_pi(struct hvcs_struct *hvcsd) +{ +	struct hvcs_partner_info *pi; +	uint32_t unit_address = hvcsd->vdev->unit_address; +	struct list_head head; +	int retval; + +	spin_lock(&hvcs_pi_lock); +	if (!hvcs_pi_buff) { +		spin_unlock(&hvcs_pi_lock); +		return -EFAULT; +	} +	retval = hvcs_get_partner_info(unit_address, &head, hvcs_pi_buff); +	spin_unlock(&hvcs_pi_lock); +	if (retval) { +		printk(KERN_ERR "HVCS: Failed to fetch partner" +			" info for vty-server@%x.\n", unit_address); +		return retval; +	} + +	/* nixes the values if the partner vty went away */ +	hvcsd->p_unit_address = 0; +	hvcsd->p_partition_ID = 0; + +	list_for_each_entry(pi, &head, node) +		hvcs_set_pi(pi, hvcsd); + +	hvcs_free_partner_info(&head); +	return 0; +} + +/* + * This function is executed by the driver "rescan" sysfs entry.  It shouldn't + * be executed elsewhere, in order to prevent deadlock issues. + */ +static int hvcs_rescan_devices_list(void) +{ +	struct hvcs_struct *hvcsd; +	unsigned long flags; + +	spin_lock(&hvcs_structs_lock); + +	list_for_each_entry(hvcsd, &hvcs_structs, next) { +		spin_lock_irqsave(&hvcsd->lock, flags); +		hvcs_get_pi(hvcsd); +		spin_unlock_irqrestore(&hvcsd->lock, flags); +	} + +	spin_unlock(&hvcs_structs_lock); + +	return 0; +} + +/* + * Farm this off into its own function because it could be more complex once + * multiple partners support is added. This function should be called with + * the hvcsd->lock held. + */ +static int hvcs_has_pi(struct hvcs_struct *hvcsd) +{ +	if ((!hvcsd->p_unit_address) || (!hvcsd->p_partition_ID)) +		return 0; +	return 1; +} + +/* + * NOTE: It is possible that the super admin removed a partner vty and then + * added a different vty as the new partner. + * + * This function must be called with the hvcsd->lock held. + */ +static int hvcs_partner_connect(struct hvcs_struct *hvcsd) +{ +	int retval; +	unsigned int unit_address = hvcsd->vdev->unit_address; + +	/* +	 * If there wasn't any pi when the device was added it doesn't meant +	 * there isn't any now.  This driver isn't notified when a new partner +	 * vty is added to a vty-server so we discover changes on our own. +	 * Please see comments in hvcs_register_connection() for justification +	 * of this bizarre code. +	 */ +	retval = hvcs_register_connection(unit_address, +			hvcsd->p_partition_ID, +			hvcsd->p_unit_address); +	if (!retval) { +		hvcsd->connected = 1; +		return 0; +	} else if (retval != -EINVAL) +		return retval; + +	/* +	 * As per the spec re-get the pi and try again if -EINVAL after the +	 * first connection attempt. +	 */ +	if (hvcs_get_pi(hvcsd)) +		return -ENOMEM; + +	if (!hvcs_has_pi(hvcsd)) +		return -ENODEV; + +	retval = hvcs_register_connection(unit_address, +			hvcsd->p_partition_ID, +			hvcsd->p_unit_address); +	if (retval != -EINVAL) { +		hvcsd->connected = 1; +		return retval; +	} + +	/* +	 * EBUSY is the most likely scenario though the vty could have been +	 * removed or there really could be an hcall error due to the parameter +	 * data but thanks to ambiguous firmware return codes we can't really +	 * tell. +	 */ +	printk(KERN_INFO "HVCS: vty-server or partner" +			" vty is busy.  Try again later.\n"); +	return -EBUSY; +} + +/* This function must be called with the hvcsd->lock held */ +static void hvcs_partner_free(struct hvcs_struct *hvcsd) +{ +	int retval; +	do { +		retval = hvcs_free_connection(hvcsd->vdev->unit_address); +	} while (retval == -EBUSY); +	hvcsd->connected = 0; +} + +/* This helper function must be called WITHOUT the hvcsd->lock held */ +static int hvcs_enable_device(struct hvcs_struct *hvcsd, uint32_t unit_address, +		unsigned int irq, struct vio_dev *vdev) +{ +	unsigned long flags; +	int rc; + +	/* +	 * It is possible that the vty-server was removed between the time that +	 * the conn was registered and now. +	 */ +	if (!(rc = request_irq(irq, &hvcs_handle_interrupt, +				0, "ibmhvcs", hvcsd))) { +		/* +		 * It is possible the vty-server was removed after the irq was +		 * requested but before we have time to enable interrupts. +		 */ +		if (vio_enable_interrupts(vdev) == H_SUCCESS) +			return 0; +		else { +			printk(KERN_ERR "HVCS: int enable failed for" +					" vty-server@%X.\n", unit_address); +			free_irq(irq, hvcsd); +		} +	} else +		printk(KERN_ERR "HVCS: irq req failed for" +				" vty-server@%X.\n", unit_address); + +	spin_lock_irqsave(&hvcsd->lock, flags); +	hvcs_partner_free(hvcsd); +	spin_unlock_irqrestore(&hvcsd->lock, flags); + +	return rc; + +} + +/* + * This always increments the kref ref count if the call is successful. + * Please remember to dec when you are done with the instance. + * + * NOTICE: Do NOT hold either the hvcs_struct.lock or hvcs_structs_lock when + * calling this function or you will get deadlock. + */ +static struct hvcs_struct *hvcs_get_by_index(int index) +{ +	struct hvcs_struct *hvcsd; +	unsigned long flags; + +	spin_lock(&hvcs_structs_lock); +	list_for_each_entry(hvcsd, &hvcs_structs, next) { +		spin_lock_irqsave(&hvcsd->lock, flags); +		if (hvcsd->index == index) { +			tty_port_get(&hvcsd->port); +			spin_unlock_irqrestore(&hvcsd->lock, flags); +			spin_unlock(&hvcs_structs_lock); +			return hvcsd; +		} +		spin_unlock_irqrestore(&hvcsd->lock, flags); +	} +	spin_unlock(&hvcs_structs_lock); + +	return NULL; +} + +static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty) +{ +	struct hvcs_struct *hvcsd; +	struct vio_dev *vdev; +	unsigned long unit_address, flags; +	unsigned int irq; +	int retval; + +	/* +	 * Is there a vty-server that shares the same index? +	 * This function increments the kref index. +	 */ +	hvcsd = hvcs_get_by_index(tty->index); +	if (!hvcsd) { +		printk(KERN_WARNING "HVCS: open failed, no device associated" +				" with tty->index %d.\n", tty->index); +		return -ENODEV; +	} + +	spin_lock_irqsave(&hvcsd->lock, flags); + +	if (hvcsd->connected == 0) { +		retval = hvcs_partner_connect(hvcsd); +		if (retval) { +			spin_unlock_irqrestore(&hvcsd->lock, flags); +			printk(KERN_WARNING "HVCS: partner connect failed.\n"); +			goto err_put; +		} +	} + +	hvcsd->port.count = 0; +	hvcsd->port.tty = tty; +	tty->driver_data = hvcsd; + +	memset(&hvcsd->buffer[0], 0x00, HVCS_BUFF_LEN); + +	/* +	 * Save these in the spinlock for the enable operations that need them +	 * outside of the spinlock. +	 */ +	irq = hvcsd->vdev->irq; +	vdev = hvcsd->vdev; +	unit_address = hvcsd->vdev->unit_address; + +	hvcsd->todo_mask |= HVCS_SCHED_READ; +	spin_unlock_irqrestore(&hvcsd->lock, flags); + +	/* +	 * This must be done outside of the spinlock because it requests irqs +	 * and will grab the spinlock and free the connection if it fails. +	 */ +	retval = hvcs_enable_device(hvcsd, unit_address, irq, vdev); +	if (retval) { +		printk(KERN_WARNING "HVCS: enable device failed.\n"); +		goto err_put; +	} + +	retval = tty_port_install(&hvcsd->port, driver, tty); +	if (retval) +		goto err_irq; + +	return 0; +err_irq: +	spin_lock_irqsave(&hvcsd->lock, flags); +	vio_disable_interrupts(hvcsd->vdev); +	spin_unlock_irqrestore(&hvcsd->lock, flags); +	free_irq(irq, hvcsd); +err_put: +	tty_port_put(&hvcsd->port); + +	return retval; +} + +/* + * This is invoked via the tty_open interface when a user app connects to the + * /dev node. + */ +static int hvcs_open(struct tty_struct *tty, struct file *filp) +{ +	struct hvcs_struct *hvcsd = tty->driver_data; +	unsigned long flags; + +	spin_lock_irqsave(&hvcsd->lock, flags); +	hvcsd->port.count++; +	hvcsd->todo_mask |= HVCS_SCHED_READ; +	spin_unlock_irqrestore(&hvcsd->lock, flags); + +	hvcs_kick(); + +	printk(KERN_INFO "HVCS: vty-server@%X connection opened.\n", +		hvcsd->vdev->unit_address ); + +	return 0; +} + +static void hvcs_close(struct tty_struct *tty, struct file *filp) +{ +	struct hvcs_struct *hvcsd; +	unsigned long flags; +	int irq; + +	/* +	 * Is someone trying to close the file associated with this device after +	 * we have hung up?  If so tty->driver_data wouldn't be valid. +	 */ +	if (tty_hung_up_p(filp)) +		return; + +	/* +	 * No driver_data means that this close was probably issued after a +	 * failed hvcs_open by the tty layer's release_dev() api and we can just +	 * exit cleanly. +	 */ +	if (!tty->driver_data) +		return; + +	hvcsd = tty->driver_data; + +	spin_lock_irqsave(&hvcsd->lock, flags); +	if (--hvcsd->port.count == 0) { + +		vio_disable_interrupts(hvcsd->vdev); + +		/* +		 * NULL this early so that the kernel_thread doesn't try to +		 * execute any operations on the TTY even though it is obligated +		 * to deliver any pending I/O to the hypervisor. +		 */ +		hvcsd->port.tty = NULL; + +		irq = hvcsd->vdev->irq; +		spin_unlock_irqrestore(&hvcsd->lock, flags); + +		tty_wait_until_sent_from_close(tty, HVCS_CLOSE_WAIT); + +		/* +		 * This line is important because it tells hvcs_open that this +		 * device needs to be re-configured the next time hvcs_open is +		 * called. +		 */ +		tty->driver_data = NULL; + +		free_irq(irq, hvcsd); +		return; +	} else if (hvcsd->port.count < 0) { +		printk(KERN_ERR "HVCS: vty-server@%X open_count: %d" +				" is missmanaged.\n", +		hvcsd->vdev->unit_address, hvcsd->port.count); +	} + +	spin_unlock_irqrestore(&hvcsd->lock, flags); +} + +static void hvcs_cleanup(struct tty_struct * tty) +{ +	struct hvcs_struct *hvcsd = tty->driver_data; + +	tty_port_put(&hvcsd->port); +} + +static void hvcs_hangup(struct tty_struct * tty) +{ +	struct hvcs_struct *hvcsd = tty->driver_data; +	unsigned long flags; +	int temp_open_count; +	int irq; + +	spin_lock_irqsave(&hvcsd->lock, flags); +	/* Preserve this so that we know how many kref refs to put */ +	temp_open_count = hvcsd->port.count; + +	/* +	 * Don't kref put inside the spinlock because the destruction +	 * callback may use the spinlock and it may get called before the +	 * spinlock has been released. +	 */ +	vio_disable_interrupts(hvcsd->vdev); + +	hvcsd->todo_mask = 0; + +	/* I don't think the tty needs the hvcs_struct pointer after a hangup */ +	tty->driver_data = NULL; +	hvcsd->port.tty = NULL; + +	hvcsd->port.count = 0; + +	/* This will drop any buffered data on the floor which is OK in a hangup +	 * scenario. */ +	memset(&hvcsd->buffer[0], 0x00, HVCS_BUFF_LEN); +	hvcsd->chars_in_buffer = 0; + +	irq = hvcsd->vdev->irq; + +	spin_unlock_irqrestore(&hvcsd->lock, flags); + +	free_irq(irq, hvcsd); + +	/* +	 * We need to kref_put() for every open_count we have since the +	 * tty_hangup() function doesn't invoke a close per open connection on a +	 * non-console device. +	 */ +	while(temp_open_count) { +		--temp_open_count; +		/* +		 * The final put will trigger destruction of the hvcs_struct. +		 * NOTE:  If this hangup was signaled from user space then the +		 * final put will never happen. +		 */ +		tty_port_put(&hvcsd->port); +	} +} + +/* + * NOTE: This is almost always from_user since user level apps interact with the + * /dev nodes. I'm trusting that if hvcs_write gets called and interrupted by + * hvcs_remove (which removes the target device and executes tty_hangup()) that + * tty_hangup will allow hvcs_write time to complete execution before it + * terminates our device. + */ +static int hvcs_write(struct tty_struct *tty, +		const unsigned char *buf, int count) +{ +	struct hvcs_struct *hvcsd = tty->driver_data; +	unsigned int unit_address; +	const unsigned char *charbuf; +	unsigned long flags; +	int total_sent = 0; +	int tosend = 0; +	int result = 0; + +	/* +	 * If they don't check the return code off of their open they may +	 * attempt this even if there is no connected device. +	 */ +	if (!hvcsd) +		return -ENODEV; + +	/* Reasonable size to prevent user level flooding */ +	if (count > HVCS_MAX_FROM_USER) { +		printk(KERN_WARNING "HVCS write: count being truncated to" +				" HVCS_MAX_FROM_USER.\n"); +		count = HVCS_MAX_FROM_USER; +	} + +	charbuf = buf; + +	spin_lock_irqsave(&hvcsd->lock, flags); + +	/* +	 * Somehow an open succeeded but the device was removed or the +	 * connection terminated between the vty-server and partner vty during +	 * the middle of a write operation?  This is a crummy place to do this +	 * but we want to keep it all in the spinlock. +	 */ +	if (hvcsd->port.count <= 0) { +		spin_unlock_irqrestore(&hvcsd->lock, flags); +		return -ENODEV; +	} + +	unit_address = hvcsd->vdev->unit_address; + +	while (count > 0) { +		tosend = min(count, (HVCS_BUFF_LEN - hvcsd->chars_in_buffer)); +		/* +		 * No more space, this probably means that the last call to +		 * hvcs_write() didn't succeed and the buffer was filled up. +		 */ +		if (!tosend) +			break; + +		memcpy(&hvcsd->buffer[hvcsd->chars_in_buffer], +				&charbuf[total_sent], +				tosend); + +		hvcsd->chars_in_buffer += tosend; + +		result = 0; + +		/* +		 * If this is true then we don't want to try writing to the +		 * hypervisor because that is the kernel_threads job now.  We'll +		 * just add to the buffer. +		 */ +		if (!(hvcsd->todo_mask & HVCS_TRY_WRITE)) +			/* won't send partial writes */ +			result = hvc_put_chars(unit_address, +					&hvcsd->buffer[0], +					hvcsd->chars_in_buffer); + +		/* +		 * Since we know we have enough room in hvcsd->buffer for +		 * tosend we record that it was sent regardless of whether the +		 * hypervisor actually took it because we have it buffered. +		 */ +		total_sent+=tosend; +		count-=tosend; +		if (result == 0) { +			hvcsd->todo_mask |= HVCS_TRY_WRITE; +			hvcs_kick(); +			break; +		} + +		hvcsd->chars_in_buffer = 0; +		/* +		 * Test after the chars_in_buffer reset otherwise this could +		 * deadlock our writes if hvc_put_chars fails. +		 */ +		if (result < 0) +			break; +	} + +	spin_unlock_irqrestore(&hvcsd->lock, flags); + +	if (result == -1) +		return -EIO; +	else +		return total_sent; +} + +/* + * This is really asking how much can we guarantee that we can send or that we + * absolutely WILL BUFFER if we can't send it.  This driver MUST honor the + * return value, hence the reason for hvcs_struct buffering. + */ +static int hvcs_write_room(struct tty_struct *tty) +{ +	struct hvcs_struct *hvcsd = tty->driver_data; + +	if (!hvcsd || hvcsd->port.count <= 0) +		return 0; + +	return HVCS_BUFF_LEN - hvcsd->chars_in_buffer; +} + +static int hvcs_chars_in_buffer(struct tty_struct *tty) +{ +	struct hvcs_struct *hvcsd = tty->driver_data; + +	return hvcsd->chars_in_buffer; +} + +static const struct tty_operations hvcs_ops = { +	.install = hvcs_install, +	.open = hvcs_open, +	.close = hvcs_close, +	.cleanup = hvcs_cleanup, +	.hangup = hvcs_hangup, +	.write = hvcs_write, +	.write_room = hvcs_write_room, +	.chars_in_buffer = hvcs_chars_in_buffer, +	.unthrottle = hvcs_unthrottle, +	.throttle = hvcs_throttle, +}; + +static int hvcs_alloc_index_list(int n) +{ +	int i; + +	hvcs_index_list = kmalloc(n * sizeof(hvcs_index_count),GFP_KERNEL); +	if (!hvcs_index_list) +		return -ENOMEM; +	hvcs_index_count = n; +	for (i = 0; i < hvcs_index_count; i++) +		hvcs_index_list[i] = -1; +	return 0; +} + +static void hvcs_free_index_list(void) +{ +	/* Paranoia check to be thorough. */ +	kfree(hvcs_index_list); +	hvcs_index_list = NULL; +	hvcs_index_count = 0; +} + +static int hvcs_initialize(void) +{ +	int rc, num_ttys_to_alloc; + +	mutex_lock(&hvcs_init_mutex); +	if (hvcs_task) { +		mutex_unlock(&hvcs_init_mutex); +		return 0; +	} + +	/* Has the user specified an overload with an insmod param? */ +	if (hvcs_parm_num_devs <= 0 || +		(hvcs_parm_num_devs > HVCS_MAX_SERVER_ADAPTERS)) { +		num_ttys_to_alloc = HVCS_DEFAULT_SERVER_ADAPTERS; +	} else +		num_ttys_to_alloc = hvcs_parm_num_devs; + +	hvcs_tty_driver = alloc_tty_driver(num_ttys_to_alloc); +	if (!hvcs_tty_driver) { +		mutex_unlock(&hvcs_init_mutex); +		return -ENOMEM; +	} + +	if (hvcs_alloc_index_list(num_ttys_to_alloc)) { +		rc = -ENOMEM; +		goto index_fail; +	} + +	hvcs_tty_driver->driver_name = hvcs_driver_name; +	hvcs_tty_driver->name = hvcs_device_node; + +	/* +	 * We'll let the system assign us a major number, indicated by leaving +	 * it blank. +	 */ + +	hvcs_tty_driver->minor_start = HVCS_MINOR_START; +	hvcs_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM; + +	/* +	 * We role our own so that we DONT ECHO.  We can't echo because the +	 * device we are connecting to already echoes by default and this would +	 * throw us into a horrible recursive echo-echo-echo loop. +	 */ +	hvcs_tty_driver->init_termios = hvcs_tty_termios; +	hvcs_tty_driver->flags = TTY_DRIVER_REAL_RAW; + +	tty_set_operations(hvcs_tty_driver, &hvcs_ops); + +	/* +	 * The following call will result in sysfs entries that denote the +	 * dynamically assigned major and minor numbers for our devices. +	 */ +	if (tty_register_driver(hvcs_tty_driver)) { +		printk(KERN_ERR "HVCS: registration as a tty driver failed.\n"); +		rc = -EIO; +		goto register_fail; +	} + +	hvcs_pi_buff = (unsigned long *) __get_free_page(GFP_KERNEL); +	if (!hvcs_pi_buff) { +		rc = -ENOMEM; +		goto buff_alloc_fail; +	} + +	hvcs_task = kthread_run(khvcsd, NULL, "khvcsd"); +	if (IS_ERR(hvcs_task)) { +		printk(KERN_ERR "HVCS: khvcsd creation failed.\n"); +		rc = -EIO; +		goto kthread_fail; +	} +	mutex_unlock(&hvcs_init_mutex); +	return 0; + +kthread_fail: +	free_page((unsigned long)hvcs_pi_buff); +buff_alloc_fail: +	tty_unregister_driver(hvcs_tty_driver); +register_fail: +	hvcs_free_index_list(); +index_fail: +	put_tty_driver(hvcs_tty_driver); +	hvcs_tty_driver = NULL; +	mutex_unlock(&hvcs_init_mutex); +	return rc; +} + +static int __init hvcs_module_init(void) +{ +	int rc = vio_register_driver(&hvcs_vio_driver); +	if (rc) { +		printk(KERN_ERR "HVCS: can't register vio driver\n"); +		return rc; +	} + +	pr_info("HVCS: Driver registered.\n"); + +	/* This needs to be done AFTER the vio_register_driver() call or else +	 * the kobjects won't be initialized properly. +	 */ +	rc = driver_create_file(&(hvcs_vio_driver.driver), &driver_attr_rescan); +	if (rc) +		pr_warning(KERN_ERR "HVCS: Failed to create rescan file (err %d)\n", rc); + +	return 0; +} + +static void __exit hvcs_module_exit(void) +{ +	/* +	 * This driver receives hvcs_remove callbacks for each device upon +	 * module removal. +	 */ +	vio_unregister_driver(&hvcs_vio_driver); +	if (!hvcs_task) +		return; + +	/* +	 * This synchronous operation  will wake the khvcsd kthread if it is +	 * asleep and will return when khvcsd has terminated. +	 */ +	kthread_stop(hvcs_task); + +	spin_lock(&hvcs_pi_lock); +	free_page((unsigned long)hvcs_pi_buff); +	hvcs_pi_buff = NULL; +	spin_unlock(&hvcs_pi_lock); + +	driver_remove_file(&hvcs_vio_driver.driver, &driver_attr_rescan); + +	tty_unregister_driver(hvcs_tty_driver); + +	hvcs_free_index_list(); + +	put_tty_driver(hvcs_tty_driver); + +	printk(KERN_INFO "HVCS: driver module removed.\n"); +} + +module_init(hvcs_module_init); +module_exit(hvcs_module_exit); diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c new file mode 100644 index 00000000000..41901997c0d --- /dev/null +++ b/drivers/tty/hvc/hvsi.c @@ -0,0 +1,1221 @@ +/* + * Copyright (C) 2004 Hollis Blanchard <hollisb@us.ibm.com>, IBM + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA + */ + +/* Host Virtual Serial Interface (HVSI) is a protocol between the hosted OS + * and the service processor on IBM pSeries servers. On these servers, there + * are no serial ports under the OS's control, and sometimes there is no other + * console available either. However, the service processor has two standard + * serial ports, so this over-complicated protocol allows the OS to control + * those ports by proxy. + * + * Besides data, the procotol supports the reading/writing of the serial + * port's DTR line, and the reading of the CD line. This is to allow the OS to + * control a modem attached to the service processor's serial port. Note that + * the OS cannot change the speed of the port through this protocol. + */ + +#undef DEBUG + +#include <linux/console.h> +#include <linux/ctype.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/major.h> +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/sysrq.h> +#include <linux/tty.h> +#include <linux/tty_flip.h> +#include <asm/hvcall.h> +#include <asm/hvconsole.h> +#include <asm/prom.h> +#include <asm/uaccess.h> +#include <asm/vio.h> +#include <asm/param.h> +#include <asm/hvsi.h> + +#define HVSI_MAJOR	229 +#define HVSI_MINOR	128 +#define MAX_NR_HVSI_CONSOLES 4 + +#define HVSI_TIMEOUT (5*HZ) +#define HVSI_VERSION 1 +#define HVSI_MAX_PACKET 256 +#define HVSI_MAX_READ 16 +#define HVSI_MAX_OUTGOING_DATA 12 +#define N_OUTBUF 12 + +/* + * we pass data via two 8-byte registers, so we would like our char arrays + * properly aligned for those loads. + */ +#define __ALIGNED__	__attribute__((__aligned__(sizeof(long)))) + +struct hvsi_struct { +	struct tty_port port; +	struct delayed_work writer; +	struct work_struct handshaker; +	wait_queue_head_t emptyq; /* woken when outbuf is emptied */ +	wait_queue_head_t stateq; /* woken when HVSI state changes */ +	spinlock_t lock; +	int index; +	uint8_t throttle_buf[128]; +	uint8_t outbuf[N_OUTBUF]; /* to implement write_room and chars_in_buffer */ +	/* inbuf is for packet reassembly. leave a little room for leftovers. */ +	uint8_t inbuf[HVSI_MAX_PACKET + HVSI_MAX_READ]; +	uint8_t *inbuf_end; +	int n_throttle; +	int n_outbuf; +	uint32_t vtermno; +	uint32_t virq; +	atomic_t seqno; /* HVSI packet sequence number */ +	uint16_t mctrl; +	uint8_t state;  /* HVSI protocol state */ +	uint8_t flags; +#ifdef CONFIG_MAGIC_SYSRQ +	uint8_t sysrq; +#endif /* CONFIG_MAGIC_SYSRQ */ +}; +static struct hvsi_struct hvsi_ports[MAX_NR_HVSI_CONSOLES]; + +static struct tty_driver *hvsi_driver; +static int hvsi_count; +static int (*hvsi_wait)(struct hvsi_struct *hp, int state); + +enum HVSI_PROTOCOL_STATE { +	HVSI_CLOSED, +	HVSI_WAIT_FOR_VER_RESPONSE, +	HVSI_WAIT_FOR_VER_QUERY, +	HVSI_OPEN, +	HVSI_WAIT_FOR_MCTRL_RESPONSE, +	HVSI_FSP_DIED, +}; +#define HVSI_CONSOLE 0x1 + +static inline int is_console(struct hvsi_struct *hp) +{ +	return hp->flags & HVSI_CONSOLE; +} + +static inline int is_open(struct hvsi_struct *hp) +{ +	/* if we're waiting for an mctrl then we're already open */ +	return (hp->state == HVSI_OPEN) +			|| (hp->state == HVSI_WAIT_FOR_MCTRL_RESPONSE); +} + +static inline void print_state(struct hvsi_struct *hp) +{ +#ifdef DEBUG +	static const char *state_names[] = { +		"HVSI_CLOSED", +		"HVSI_WAIT_FOR_VER_RESPONSE", +		"HVSI_WAIT_FOR_VER_QUERY", +		"HVSI_OPEN", +		"HVSI_WAIT_FOR_MCTRL_RESPONSE", +		"HVSI_FSP_DIED", +	}; +	const char *name = (hp->state < ARRAY_SIZE(state_names)) +		? state_names[hp->state] : "UNKNOWN"; + +	pr_debug("hvsi%i: state = %s\n", hp->index, name); +#endif /* DEBUG */ +} + +static inline void __set_state(struct hvsi_struct *hp, int state) +{ +	hp->state = state; +	print_state(hp); +	wake_up_all(&hp->stateq); +} + +static inline void set_state(struct hvsi_struct *hp, int state) +{ +	unsigned long flags; + +	spin_lock_irqsave(&hp->lock, flags); +	__set_state(hp, state); +	spin_unlock_irqrestore(&hp->lock, flags); +} + +static inline int len_packet(const uint8_t *packet) +{ +	return (int)((struct hvsi_header *)packet)->len; +} + +static inline int is_header(const uint8_t *packet) +{ +	struct hvsi_header *header = (struct hvsi_header *)packet; +	return header->type >= VS_QUERY_RESPONSE_PACKET_HEADER; +} + +static inline int got_packet(const struct hvsi_struct *hp, uint8_t *packet) +{ +	if (hp->inbuf_end < packet + sizeof(struct hvsi_header)) +		return 0; /* don't even have the packet header */ + +	if (hp->inbuf_end < (packet + len_packet(packet))) +		return 0; /* don't have the rest of the packet */ + +	return 1; +} + +/* shift remaining bytes in packetbuf down */ +static void compact_inbuf(struct hvsi_struct *hp, uint8_t *read_to) +{ +	int remaining = (int)(hp->inbuf_end - read_to); + +	pr_debug("%s: %i chars remain\n", __func__, remaining); + +	if (read_to != hp->inbuf) +		memmove(hp->inbuf, read_to, remaining); + +	hp->inbuf_end = hp->inbuf + remaining; +} + +#ifdef DEBUG +#define dbg_dump_packet(packet) dump_packet(packet) +#define dbg_dump_hex(data, len) dump_hex(data, len) +#else +#define dbg_dump_packet(packet) do { } while (0) +#define dbg_dump_hex(data, len) do { } while (0) +#endif + +static void dump_hex(const uint8_t *data, int len) +{ +	int i; + +	printk("    "); +	for (i=0; i < len; i++) +		printk("%.2x", data[i]); + +	printk("\n    "); +	for (i=0; i < len; i++) { +		if (isprint(data[i])) +			printk("%c", data[i]); +		else +			printk("."); +	} +	printk("\n"); +} + +static void dump_packet(uint8_t *packet) +{ +	struct hvsi_header *header = (struct hvsi_header *)packet; + +	printk("type 0x%x, len %i, seqno %i:\n", header->type, header->len, +			header->seqno); + +	dump_hex(packet, header->len); +} + +static int hvsi_read(struct hvsi_struct *hp, char *buf, int count) +{ +	unsigned long got; + +	got = hvc_get_chars(hp->vtermno, buf, count); + +	return got; +} + +static void hvsi_recv_control(struct hvsi_struct *hp, uint8_t *packet, +	struct tty_struct *tty, struct hvsi_struct **to_handshake) +{ +	struct hvsi_control *header = (struct hvsi_control *)packet; + +	switch (header->verb) { +		case VSV_MODEM_CTL_UPDATE: +			if ((header->word & HVSI_TSCD) == 0) { +				/* CD went away; no more connection */ +				pr_debug("hvsi%i: CD dropped\n", hp->index); +				hp->mctrl &= TIOCM_CD; +				if (tty && !C_CLOCAL(tty)) +					tty_hangup(tty); +			} +			break; +		case VSV_CLOSE_PROTOCOL: +			pr_debug("hvsi%i: service processor came back\n", hp->index); +			if (hp->state != HVSI_CLOSED) { +				*to_handshake = hp; +			} +			break; +		default: +			printk(KERN_WARNING "hvsi%i: unknown HVSI control packet: ", +				hp->index); +			dump_packet(packet); +			break; +	} +} + +static void hvsi_recv_response(struct hvsi_struct *hp, uint8_t *packet) +{ +	struct hvsi_query_response *resp = (struct hvsi_query_response *)packet; + +	switch (hp->state) { +		case HVSI_WAIT_FOR_VER_RESPONSE: +			__set_state(hp, HVSI_WAIT_FOR_VER_QUERY); +			break; +		case HVSI_WAIT_FOR_MCTRL_RESPONSE: +			hp->mctrl = 0; +			if (resp->u.mctrl_word & HVSI_TSDTR) +				hp->mctrl |= TIOCM_DTR; +			if (resp->u.mctrl_word & HVSI_TSCD) +				hp->mctrl |= TIOCM_CD; +			__set_state(hp, HVSI_OPEN); +			break; +		default: +			printk(KERN_ERR "hvsi%i: unexpected query response: ", hp->index); +			dump_packet(packet); +			break; +	} +} + +/* respond to service processor's version query */ +static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno) +{ +	struct hvsi_query_response packet __ALIGNED__; +	int wrote; + +	packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER; +	packet.hdr.len = sizeof(struct hvsi_query_response); +	packet.hdr.seqno = atomic_inc_return(&hp->seqno); +	packet.verb = VSV_SEND_VERSION_NUMBER; +	packet.u.version = HVSI_VERSION; +	packet.query_seqno = query_seqno+1; + +	pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len); +	dbg_dump_hex((uint8_t*)&packet, packet.hdr.len); + +	wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len); +	if (wrote != packet.hdr.len) { +		printk(KERN_ERR "hvsi%i: couldn't send query response!\n", +			hp->index); +		return -EIO; +	} + +	return 0; +} + +static void hvsi_recv_query(struct hvsi_struct *hp, uint8_t *packet) +{ +	struct hvsi_query *query = (struct hvsi_query *)packet; + +	switch (hp->state) { +		case HVSI_WAIT_FOR_VER_QUERY: +			hvsi_version_respond(hp, query->hdr.seqno); +			__set_state(hp, HVSI_OPEN); +			break; +		default: +			printk(KERN_ERR "hvsi%i: unexpected query: ", hp->index); +			dump_packet(packet); +			break; +	} +} + +static void hvsi_insert_chars(struct hvsi_struct *hp, const char *buf, int len) +{ +	int i; + +	for (i=0; i < len; i++) { +		char c = buf[i]; +#ifdef CONFIG_MAGIC_SYSRQ +		if (c == '\0') { +			hp->sysrq = 1; +			continue; +		} else if (hp->sysrq) { +			handle_sysrq(c); +			hp->sysrq = 0; +			continue; +		} +#endif /* CONFIG_MAGIC_SYSRQ */ +		tty_insert_flip_char(&hp->port, c, 0); +	} +} + +/* + * We could get 252 bytes of data at once here. But the tty layer only + * throttles us at TTY_THRESHOLD_THROTTLE (128) bytes, so we could overflow + * it. Accordingly we won't send more than 128 bytes at a time to the flip + * buffer, which will give the tty buffer a chance to throttle us. Should the + * value of TTY_THRESHOLD_THROTTLE change in n_tty.c, this code should be + * revisited. + */ +#define TTY_THRESHOLD_THROTTLE 128 +static bool hvsi_recv_data(struct hvsi_struct *hp, const uint8_t *packet) +{ +	const struct hvsi_header *header = (const struct hvsi_header *)packet; +	const uint8_t *data = packet + sizeof(struct hvsi_header); +	int datalen = header->len - sizeof(struct hvsi_header); +	int overflow = datalen - TTY_THRESHOLD_THROTTLE; + +	pr_debug("queueing %i chars '%.*s'\n", datalen, datalen, data); + +	if (datalen == 0) +		return false; + +	if (overflow > 0) { +		pr_debug("%s: got >TTY_THRESHOLD_THROTTLE bytes\n", __func__); +		datalen = TTY_THRESHOLD_THROTTLE; +	} + +	hvsi_insert_chars(hp, data, datalen); + +	if (overflow > 0) { +		/* +		 * we still have more data to deliver, so we need to save off the +		 * overflow and send it later +		 */ +		pr_debug("%s: deferring overflow\n", __func__); +		memcpy(hp->throttle_buf, data + TTY_THRESHOLD_THROTTLE, overflow); +		hp->n_throttle = overflow; +	} + +	return true; +} + +/* + * Returns true/false indicating data successfully read from hypervisor. + * Used both to get packets for tty connections and to advance the state + * machine during console handshaking (in which case tty = NULL and we ignore + * incoming data). + */ +static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct *tty, +		struct hvsi_struct **handshake) +{ +	uint8_t *packet = hp->inbuf; +	int chunklen; +	bool flip = false; + +	*handshake = NULL; + +	chunklen = hvsi_read(hp, hp->inbuf_end, HVSI_MAX_READ); +	if (chunklen == 0) { +		pr_debug("%s: 0-length read\n", __func__); +		return 0; +	} + +	pr_debug("%s: got %i bytes\n", __func__, chunklen); +	dbg_dump_hex(hp->inbuf_end, chunklen); + +	hp->inbuf_end += chunklen; + +	/* handle all completed packets */ +	while ((packet < hp->inbuf_end) && got_packet(hp, packet)) { +		struct hvsi_header *header = (struct hvsi_header *)packet; + +		if (!is_header(packet)) { +			printk(KERN_ERR "hvsi%i: got malformed packet\n", hp->index); +			/* skip bytes until we find a header or run out of data */ +			while ((packet < hp->inbuf_end) && (!is_header(packet))) +				packet++; +			continue; +		} + +		pr_debug("%s: handling %i-byte packet\n", __func__, +				len_packet(packet)); +		dbg_dump_packet(packet); + +		switch (header->type) { +			case VS_DATA_PACKET_HEADER: +				if (!is_open(hp)) +					break; +				flip = hvsi_recv_data(hp, packet); +				break; +			case VS_CONTROL_PACKET_HEADER: +				hvsi_recv_control(hp, packet, tty, handshake); +				break; +			case VS_QUERY_RESPONSE_PACKET_HEADER: +				hvsi_recv_response(hp, packet); +				break; +			case VS_QUERY_PACKET_HEADER: +				hvsi_recv_query(hp, packet); +				break; +			default: +				printk(KERN_ERR "hvsi%i: unknown HVSI packet type 0x%x\n", +						hp->index, header->type); +				dump_packet(packet); +				break; +		} + +		packet += len_packet(packet); + +		if (*handshake) { +			pr_debug("%s: handshake\n", __func__); +			break; +		} +	} + +	compact_inbuf(hp, packet); + +	if (flip) +		tty_flip_buffer_push(&hp->port); + +	return 1; +} + +static void hvsi_send_overflow(struct hvsi_struct *hp) +{ +	pr_debug("%s: delivering %i bytes overflow\n", __func__, +			hp->n_throttle); + +	hvsi_insert_chars(hp, hp->throttle_buf, hp->n_throttle); +	hp->n_throttle = 0; +} + +/* + * must get all pending data because we only get an irq on empty->non-empty + * transition + */ +static irqreturn_t hvsi_interrupt(int irq, void *arg) +{ +	struct hvsi_struct *hp = (struct hvsi_struct *)arg; +	struct hvsi_struct *handshake; +	struct tty_struct *tty; +	unsigned long flags; +	int again = 1; + +	pr_debug("%s\n", __func__); + +	tty = tty_port_tty_get(&hp->port); + +	while (again) { +		spin_lock_irqsave(&hp->lock, flags); +		again = hvsi_load_chunk(hp, tty, &handshake); +		spin_unlock_irqrestore(&hp->lock, flags); + +		if (handshake) { +			pr_debug("hvsi%i: attempting re-handshake\n", handshake->index); +			schedule_work(&handshake->handshaker); +		} +	} + +	spin_lock_irqsave(&hp->lock, flags); +	if (tty && hp->n_throttle && !test_bit(TTY_THROTTLED, &tty->flags)) { +		/* we weren't hung up and we weren't throttled, so we can +		 * deliver the rest now */ +		hvsi_send_overflow(hp); +		tty_flip_buffer_push(&hp->port); +	} +	spin_unlock_irqrestore(&hp->lock, flags); + +	tty_kref_put(tty); + +	return IRQ_HANDLED; +} + +/* for boot console, before the irq handler is running */ +static int __init poll_for_state(struct hvsi_struct *hp, int state) +{ +	unsigned long end_jiffies = jiffies + HVSI_TIMEOUT; + +	for (;;) { +		hvsi_interrupt(hp->virq, (void *)hp); /* get pending data */ + +		if (hp->state == state) +			return 0; + +		mdelay(5); +		if (time_after(jiffies, end_jiffies)) +			return -EIO; +	} +} + +/* wait for irq handler to change our state */ +static int wait_for_state(struct hvsi_struct *hp, int state) +{ +	int ret = 0; + +	if (!wait_event_timeout(hp->stateq, (hp->state == state), HVSI_TIMEOUT)) +		ret = -EIO; + +	return ret; +} + +static int hvsi_query(struct hvsi_struct *hp, uint16_t verb) +{ +	struct hvsi_query packet __ALIGNED__; +	int wrote; + +	packet.hdr.type = VS_QUERY_PACKET_HEADER; +	packet.hdr.len = sizeof(struct hvsi_query); +	packet.hdr.seqno = atomic_inc_return(&hp->seqno); +	packet.verb = verb; + +	pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len); +	dbg_dump_hex((uint8_t*)&packet, packet.hdr.len); + +	wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len); +	if (wrote != packet.hdr.len) { +		printk(KERN_ERR "hvsi%i: couldn't send query (%i)!\n", hp->index, +			wrote); +		return -EIO; +	} + +	return 0; +} + +static int hvsi_get_mctrl(struct hvsi_struct *hp) +{ +	int ret; + +	set_state(hp, HVSI_WAIT_FOR_MCTRL_RESPONSE); +	hvsi_query(hp, VSV_SEND_MODEM_CTL_STATUS); + +	ret = hvsi_wait(hp, HVSI_OPEN); +	if (ret < 0) { +		printk(KERN_ERR "hvsi%i: didn't get modem flags\n", hp->index); +		set_state(hp, HVSI_OPEN); +		return ret; +	} + +	pr_debug("%s: mctrl 0x%x\n", __func__, hp->mctrl); + +	return 0; +} + +/* note that we can only set DTR */ +static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl) +{ +	struct hvsi_control packet __ALIGNED__; +	int wrote; + +	packet.hdr.type = VS_CONTROL_PACKET_HEADER, +	packet.hdr.seqno = atomic_inc_return(&hp->seqno); +	packet.hdr.len = sizeof(struct hvsi_control); +	packet.verb = VSV_SET_MODEM_CTL; +	packet.mask = HVSI_TSDTR; + +	if (mctrl & TIOCM_DTR) +		packet.word = HVSI_TSDTR; + +	pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len); +	dbg_dump_hex((uint8_t*)&packet, packet.hdr.len); + +	wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len); +	if (wrote != packet.hdr.len) { +		printk(KERN_ERR "hvsi%i: couldn't set DTR!\n", hp->index); +		return -EIO; +	} + +	return 0; +} + +static void hvsi_drain_input(struct hvsi_struct *hp) +{ +	uint8_t buf[HVSI_MAX_READ] __ALIGNED__; +	unsigned long end_jiffies = jiffies + HVSI_TIMEOUT; + +	while (time_before(end_jiffies, jiffies)) +		if (0 == hvsi_read(hp, buf, HVSI_MAX_READ)) +			break; +} + +static int hvsi_handshake(struct hvsi_struct *hp) +{ +	int ret; + +	/* +	 * We could have a CLOSE or other data waiting for us before we even try +	 * to open; try to throw it all away so we don't get confused. (CLOSE +	 * is the first message sent up the pipe when the FSP comes online. We +	 * need to distinguish between "it came up a while ago and we're the first +	 * user" and "it was just reset before it saw our handshake packet".) +	 */ +	hvsi_drain_input(hp); + +	set_state(hp, HVSI_WAIT_FOR_VER_RESPONSE); +	ret = hvsi_query(hp, VSV_SEND_VERSION_NUMBER); +	if (ret < 0) { +		printk(KERN_ERR "hvsi%i: couldn't send version query\n", hp->index); +		return ret; +	} + +	ret = hvsi_wait(hp, HVSI_OPEN); +	if (ret < 0) +		return ret; + +	return 0; +} + +static void hvsi_handshaker(struct work_struct *work) +{ +	struct hvsi_struct *hp = +		container_of(work, struct hvsi_struct, handshaker); + +	if (hvsi_handshake(hp) >= 0) +		return; + +	printk(KERN_ERR "hvsi%i: re-handshaking failed\n", hp->index); +	if (is_console(hp)) { +		/* +		 * ttys will re-attempt the handshake via hvsi_open, but +		 * the console will not. +		 */ +		printk(KERN_ERR "hvsi%i: lost console!\n", hp->index); +	} +} + +static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count) +{ +	struct hvsi_data packet __ALIGNED__; +	int ret; + +	BUG_ON(count > HVSI_MAX_OUTGOING_DATA); + +	packet.hdr.type = VS_DATA_PACKET_HEADER; +	packet.hdr.seqno = atomic_inc_return(&hp->seqno); +	packet.hdr.len = count + sizeof(struct hvsi_header); +	memcpy(&packet.data, buf, count); + +	ret = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len); +	if (ret == packet.hdr.len) { +		/* return the number of chars written, not the packet length */ +		return count; +	} +	return ret; /* return any errors */ +} + +static void hvsi_close_protocol(struct hvsi_struct *hp) +{ +	struct hvsi_control packet __ALIGNED__; + +	packet.hdr.type = VS_CONTROL_PACKET_HEADER; +	packet.hdr.seqno = atomic_inc_return(&hp->seqno); +	packet.hdr.len = 6; +	packet.verb = VSV_CLOSE_PROTOCOL; + +	pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len); +	dbg_dump_hex((uint8_t*)&packet, packet.hdr.len); + +	hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len); +} + +static int hvsi_open(struct tty_struct *tty, struct file *filp) +{ +	struct hvsi_struct *hp; +	unsigned long flags; +	int ret; + +	pr_debug("%s\n", __func__); + +	hp = &hvsi_ports[tty->index]; + +	tty->driver_data = hp; + +	mb(); +	if (hp->state == HVSI_FSP_DIED) +		return -EIO; + +	tty_port_tty_set(&hp->port, tty); +	spin_lock_irqsave(&hp->lock, flags); +	hp->port.count++; +	atomic_set(&hp->seqno, 0); +	h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE); +	spin_unlock_irqrestore(&hp->lock, flags); + +	if (is_console(hp)) +		return 0; /* this has already been handshaked as the console */ + +	ret = hvsi_handshake(hp); +	if (ret < 0) { +		printk(KERN_ERR "%s: HVSI handshaking failed\n", tty->name); +		return ret; +	} + +	ret = hvsi_get_mctrl(hp); +	if (ret < 0) { +		printk(KERN_ERR "%s: couldn't get initial modem flags\n", tty->name); +		return ret; +	} + +	ret = hvsi_set_mctrl(hp, hp->mctrl | TIOCM_DTR); +	if (ret < 0) { +		printk(KERN_ERR "%s: couldn't set DTR\n", tty->name); +		return ret; +	} + +	return 0; +} + +/* wait for hvsi_write_worker to empty hp->outbuf */ +static void hvsi_flush_output(struct hvsi_struct *hp) +{ +	wait_event_timeout(hp->emptyq, (hp->n_outbuf <= 0), HVSI_TIMEOUT); + +	/* 'writer' could still be pending if it didn't see n_outbuf = 0 yet */ +	cancel_delayed_work_sync(&hp->writer); +	flush_work(&hp->handshaker); + +	/* +	 * it's also possible that our timeout expired and hvsi_write_worker +	 * didn't manage to push outbuf. poof. +	 */ +	hp->n_outbuf = 0; +} + +static void hvsi_close(struct tty_struct *tty, struct file *filp) +{ +	struct hvsi_struct *hp = tty->driver_data; +	unsigned long flags; + +	pr_debug("%s\n", __func__); + +	if (tty_hung_up_p(filp)) +		return; + +	spin_lock_irqsave(&hp->lock, flags); + +	if (--hp->port.count == 0) { +		tty_port_tty_set(&hp->port, NULL); +		hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */ + +		/* only close down connection if it is not the console */ +		if (!is_console(hp)) { +			h_vio_signal(hp->vtermno, VIO_IRQ_DISABLE); /* no more irqs */ +			__set_state(hp, HVSI_CLOSED); +			/* +			 * any data delivered to the tty layer after this will be +			 * discarded (except for XON/XOFF) +			 */ +			tty->closing = 1; + +			spin_unlock_irqrestore(&hp->lock, flags); + +			/* let any existing irq handlers finish. no more will start. */ +			synchronize_irq(hp->virq); + +			/* hvsi_write_worker will re-schedule until outbuf is empty. */ +			hvsi_flush_output(hp); + +			/* tell FSP to stop sending data */ +			hvsi_close_protocol(hp); + +			/* +			 * drain anything FSP is still in the middle of sending, and let +			 * hvsi_handshake drain the rest on the next open. +			 */ +			hvsi_drain_input(hp); + +			spin_lock_irqsave(&hp->lock, flags); +		} +	} else if (hp->port.count < 0) +		printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n", +		       hp - hvsi_ports, hp->port.count); + +	spin_unlock_irqrestore(&hp->lock, flags); +} + +static void hvsi_hangup(struct tty_struct *tty) +{ +	struct hvsi_struct *hp = tty->driver_data; +	unsigned long flags; + +	pr_debug("%s\n", __func__); + +	tty_port_tty_set(&hp->port, NULL); + +	spin_lock_irqsave(&hp->lock, flags); +	hp->port.count = 0; +	hp->n_outbuf = 0; +	spin_unlock_irqrestore(&hp->lock, flags); +} + +/* called with hp->lock held */ +static void hvsi_push(struct hvsi_struct *hp) +{ +	int n; + +	if (hp->n_outbuf <= 0) +		return; + +	n = hvsi_put_chars(hp, hp->outbuf, hp->n_outbuf); +	if (n > 0) { +		/* success */ +		pr_debug("%s: wrote %i chars\n", __func__, n); +		hp->n_outbuf = 0; +	} else if (n == -EIO) { +		__set_state(hp, HVSI_FSP_DIED); +		printk(KERN_ERR "hvsi%i: service processor died\n", hp->index); +	} +} + +/* hvsi_write_worker will keep rescheduling itself until outbuf is empty */ +static void hvsi_write_worker(struct work_struct *work) +{ +	struct hvsi_struct *hp = +		container_of(work, struct hvsi_struct, writer.work); +	unsigned long flags; +#ifdef DEBUG +	static long start_j = 0; + +	if (start_j == 0) +		start_j = jiffies; +#endif /* DEBUG */ + +	spin_lock_irqsave(&hp->lock, flags); + +	pr_debug("%s: %i chars in buffer\n", __func__, hp->n_outbuf); + +	if (!is_open(hp)) { +		/* +		 * We could have a non-open connection if the service processor died +		 * while we were busily scheduling ourselves. In that case, it could +		 * be minutes before the service processor comes back, so only try +		 * again once a second. +		 */ +		schedule_delayed_work(&hp->writer, HZ); +		goto out; +	} + +	hvsi_push(hp); +	if (hp->n_outbuf > 0) +		schedule_delayed_work(&hp->writer, 10); +	else { +#ifdef DEBUG +		pr_debug("%s: outbuf emptied after %li jiffies\n", __func__, +				jiffies - start_j); +		start_j = 0; +#endif /* DEBUG */ +		wake_up_all(&hp->emptyq); +		tty_port_tty_wakeup(&hp->port); +	} + +out: +	spin_unlock_irqrestore(&hp->lock, flags); +} + +static int hvsi_write_room(struct tty_struct *tty) +{ +	struct hvsi_struct *hp = tty->driver_data; + +	return N_OUTBUF - hp->n_outbuf; +} + +static int hvsi_chars_in_buffer(struct tty_struct *tty) +{ +	struct hvsi_struct *hp = tty->driver_data; + +	return hp->n_outbuf; +} + +static int hvsi_write(struct tty_struct *tty, +		     const unsigned char *buf, int count) +{ +	struct hvsi_struct *hp = tty->driver_data; +	const char *source = buf; +	unsigned long flags; +	int total = 0; +	int origcount = count; + +	spin_lock_irqsave(&hp->lock, flags); + +	pr_debug("%s: %i chars in buffer\n", __func__, hp->n_outbuf); + +	if (!is_open(hp)) { +		/* we're either closing or not yet open; don't accept data */ +		pr_debug("%s: not open\n", __func__); +		goto out; +	} + +	/* +	 * when the hypervisor buffer (16K) fills, data will stay in hp->outbuf +	 * and hvsi_write_worker will be scheduled. subsequent hvsi_write() calls +	 * will see there is no room in outbuf and return. +	 */ +	while ((count > 0) && (hvsi_write_room(tty) > 0)) { +		int chunksize = min(count, hvsi_write_room(tty)); + +		BUG_ON(hp->n_outbuf < 0); +		memcpy(hp->outbuf + hp->n_outbuf, source, chunksize); +		hp->n_outbuf += chunksize; + +		total += chunksize; +		source += chunksize; +		count -= chunksize; +		hvsi_push(hp); +	} + +	if (hp->n_outbuf > 0) { +		/* +		 * we weren't able to write it all to the hypervisor. +		 * schedule another push attempt. +		 */ +		schedule_delayed_work(&hp->writer, 10); +	} + +out: +	spin_unlock_irqrestore(&hp->lock, flags); + +	if (total != origcount) +		pr_debug("%s: wanted %i, only wrote %i\n", __func__, origcount, +			total); + +	return total; +} + +/* + * I have never seen throttle or unthrottle called, so this little throttle + * buffering scheme may or may not work. + */ +static void hvsi_throttle(struct tty_struct *tty) +{ +	struct hvsi_struct *hp = tty->driver_data; + +	pr_debug("%s\n", __func__); + +	h_vio_signal(hp->vtermno, VIO_IRQ_DISABLE); +} + +static void hvsi_unthrottle(struct tty_struct *tty) +{ +	struct hvsi_struct *hp = tty->driver_data; +	unsigned long flags; + +	pr_debug("%s\n", __func__); + +	spin_lock_irqsave(&hp->lock, flags); +	if (hp->n_throttle) { +		hvsi_send_overflow(hp); +		tty_flip_buffer_push(&hp->port); +	} +	spin_unlock_irqrestore(&hp->lock, flags); + + +	h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE); +} + +static int hvsi_tiocmget(struct tty_struct *tty) +{ +	struct hvsi_struct *hp = tty->driver_data; + +	hvsi_get_mctrl(hp); +	return hp->mctrl; +} + +static int hvsi_tiocmset(struct tty_struct *tty, +				unsigned int set, unsigned int clear) +{ +	struct hvsi_struct *hp = tty->driver_data; +	unsigned long flags; +	uint16_t new_mctrl; + +	/* we can only alter DTR */ +	clear &= TIOCM_DTR; +	set &= TIOCM_DTR; + +	spin_lock_irqsave(&hp->lock, flags); + +	new_mctrl = (hp->mctrl & ~clear) | set; + +	if (hp->mctrl != new_mctrl) { +		hvsi_set_mctrl(hp, new_mctrl); +		hp->mctrl = new_mctrl; +	} +	spin_unlock_irqrestore(&hp->lock, flags); + +	return 0; +} + + +static const struct tty_operations hvsi_ops = { +	.open = hvsi_open, +	.close = hvsi_close, +	.write = hvsi_write, +	.hangup = hvsi_hangup, +	.write_room = hvsi_write_room, +	.chars_in_buffer = hvsi_chars_in_buffer, +	.throttle = hvsi_throttle, +	.unthrottle = hvsi_unthrottle, +	.tiocmget = hvsi_tiocmget, +	.tiocmset = hvsi_tiocmset, +}; + +static int __init hvsi_init(void) +{ +	int i; + +	hvsi_driver = alloc_tty_driver(hvsi_count); +	if (!hvsi_driver) +		return -ENOMEM; + +	hvsi_driver->driver_name = "hvsi"; +	hvsi_driver->name = "hvsi"; +	hvsi_driver->major = HVSI_MAJOR; +	hvsi_driver->minor_start = HVSI_MINOR; +	hvsi_driver->type = TTY_DRIVER_TYPE_SYSTEM; +	hvsi_driver->init_termios = tty_std_termios; +	hvsi_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL; +	hvsi_driver->init_termios.c_ispeed = 9600; +	hvsi_driver->init_termios.c_ospeed = 9600; +	hvsi_driver->flags = TTY_DRIVER_REAL_RAW; +	tty_set_operations(hvsi_driver, &hvsi_ops); + +	for (i=0; i < hvsi_count; i++) { +		struct hvsi_struct *hp = &hvsi_ports[i]; +		int ret = 1; + +		tty_port_link_device(&hp->port, hvsi_driver, i); + +		ret = request_irq(hp->virq, hvsi_interrupt, 0, "hvsi", hp); +		if (ret) +			printk(KERN_ERR "HVSI: couldn't reserve irq 0x%x (error %i)\n", +				hp->virq, ret); +	} +	hvsi_wait = wait_for_state; /* irqs active now */ + +	if (tty_register_driver(hvsi_driver)) +		panic("Couldn't register hvsi console driver\n"); + +	printk(KERN_DEBUG "HVSI: registered %i devices\n", hvsi_count); + +	return 0; +} +device_initcall(hvsi_init); + +/***** console (not tty) code: *****/ + +static void hvsi_console_print(struct console *console, const char *buf, +		unsigned int count) +{ +	struct hvsi_struct *hp = &hvsi_ports[console->index]; +	char c[HVSI_MAX_OUTGOING_DATA] __ALIGNED__; +	unsigned int i = 0, n = 0; +	int ret, donecr = 0; + +	mb(); +	if (!is_open(hp)) +		return; + +	/* +	 * ugh, we have to translate LF -> CRLF ourselves, in place. +	 * copied from hvc_console.c: +	 */ +	while (count > 0 || i > 0) { +		if (count > 0 && i < sizeof(c)) { +			if (buf[n] == '\n' && !donecr) { +				c[i++] = '\r'; +				donecr = 1; +			} else { +				c[i++] = buf[n++]; +				donecr = 0; +				--count; +			} +		} else { +			ret = hvsi_put_chars(hp, c, i); +			if (ret < 0) +				i = 0; +			i -= ret; +		} +	} +} + +static struct tty_driver *hvsi_console_device(struct console *console, +	int *index) +{ +	*index = console->index; +	return hvsi_driver; +} + +static int __init hvsi_console_setup(struct console *console, char *options) +{ +	struct hvsi_struct *hp; +	int ret; + +	if (console->index < 0 || console->index >= hvsi_count) +		return -1; +	hp = &hvsi_ports[console->index]; + +	/* give the FSP a chance to change the baud rate when we re-open */ +	hvsi_close_protocol(hp); + +	ret = hvsi_handshake(hp); +	if (ret < 0) +		return ret; + +	ret = hvsi_get_mctrl(hp); +	if (ret < 0) +		return ret; + +	ret = hvsi_set_mctrl(hp, hp->mctrl | TIOCM_DTR); +	if (ret < 0) +		return ret; + +	hp->flags |= HVSI_CONSOLE; + +	return 0; +} + +static struct console hvsi_console = { +	.name		= "hvsi", +	.write		= hvsi_console_print, +	.device		= hvsi_console_device, +	.setup		= hvsi_console_setup, +	.flags		= CON_PRINTBUFFER, +	.index		= -1, +}; + +static int __init hvsi_console_init(void) +{ +	struct device_node *vty; + +	hvsi_wait = poll_for_state; /* no irqs yet; must poll */ + +	/* search device tree for vty nodes */ +	for_each_compatible_node(vty, "serial", "hvterm-protocol") { +		struct hvsi_struct *hp; +		const uint32_t *vtermno, *irq; + +		vtermno = of_get_property(vty, "reg", NULL); +		irq = of_get_property(vty, "interrupts", NULL); +		if (!vtermno || !irq) +			continue; + +		if (hvsi_count >= MAX_NR_HVSI_CONSOLES) { +			of_node_put(vty); +			break; +		} + +		hp = &hvsi_ports[hvsi_count]; +		INIT_DELAYED_WORK(&hp->writer, hvsi_write_worker); +		INIT_WORK(&hp->handshaker, hvsi_handshaker); +		init_waitqueue_head(&hp->emptyq); +		init_waitqueue_head(&hp->stateq); +		spin_lock_init(&hp->lock); +		tty_port_init(&hp->port); +		hp->index = hvsi_count; +		hp->inbuf_end = hp->inbuf; +		hp->state = HVSI_CLOSED; +		hp->vtermno = *vtermno; +		hp->virq = irq_create_mapping(NULL, irq[0]); +		if (hp->virq == 0) { +			printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n", +				__func__, irq[0]); +			tty_port_destroy(&hp->port); +			continue; +		} + +		hvsi_count++; +	} + +	if (hvsi_count) +		register_console(&hvsi_console); +	return 0; +} +console_initcall(hvsi_console_init); diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c new file mode 100644 index 00000000000..7ae6c293e51 --- /dev/null +++ b/drivers/tty/hvc/hvsi_lib.c @@ -0,0 +1,424 @@ +#include <linux/types.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/console.h> +#include <asm/hvsi.h> + +#include "hvc_console.h" + +static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet) +{ +	packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno)); + +	/* Assumes that always succeeds, works in practice */ +	return pv->put_chars(pv->termno, (char *)packet, packet->len); +} + +static void hvsi_start_handshake(struct hvsi_priv *pv) +{ +	struct hvsi_query q; + +	/* Reset state */ +	pv->established = 0; +	atomic_set(&pv->seqno, 0); + +	pr_devel("HVSI@%x: Handshaking started\n", pv->termno); + +	/* Send version query */ +	q.hdr.type = VS_QUERY_PACKET_HEADER; +	q.hdr.len = sizeof(struct hvsi_query); +	q.verb = cpu_to_be16(VSV_SEND_VERSION_NUMBER); +	hvsi_send_packet(pv, &q.hdr); +} + +static int hvsi_send_close(struct hvsi_priv *pv) +{ +	struct hvsi_control ctrl; + +	pv->established = 0; + +	ctrl.hdr.type = VS_CONTROL_PACKET_HEADER; +	ctrl.hdr.len = sizeof(struct hvsi_control); +	ctrl.verb = cpu_to_be16(VSV_CLOSE_PROTOCOL); +	return hvsi_send_packet(pv, &ctrl.hdr); +} + +static void hvsi_cd_change(struct hvsi_priv *pv, int cd) +{ +	if (cd) +		pv->mctrl |= TIOCM_CD; +	else { +		pv->mctrl &= ~TIOCM_CD; + +		/* We copy the existing hvsi driver semantics +		 * here which are to trigger a hangup when +		 * we get a carrier loss. +		 * Closing our connection to the server will +		 * do just that. +		 */ +		if (!pv->is_console && pv->opened) { +			pr_devel("HVSI@%x Carrier lost, hanging up !\n", +				 pv->termno); +			hvsi_send_close(pv); +		} +	} +} + +static void hvsi_got_control(struct hvsi_priv *pv) +{ +	struct hvsi_control *pkt = (struct hvsi_control *)pv->inbuf; + +	switch (be16_to_cpu(pkt->verb)) { +	case VSV_CLOSE_PROTOCOL: +		/* We restart the handshaking */ +		hvsi_start_handshake(pv); +		break; +	case VSV_MODEM_CTL_UPDATE: +		/* Transition of carrier detect */ +		hvsi_cd_change(pv, be32_to_cpu(pkt->word) & HVSI_TSCD); +		break; +	} +} + +static void hvsi_got_query(struct hvsi_priv *pv) +{ +	struct hvsi_query *pkt = (struct hvsi_query *)pv->inbuf; +	struct hvsi_query_response r; + +	/* We only handle version queries */ +	if (be16_to_cpu(pkt->verb) != VSV_SEND_VERSION_NUMBER) +		return; + +	pr_devel("HVSI@%x: Got version query, sending response...\n", +		 pv->termno); + +	/* Send version response */ +	r.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER; +	r.hdr.len = sizeof(struct hvsi_query_response); +	r.verb = cpu_to_be16(VSV_SEND_VERSION_NUMBER); +	r.u.version = HVSI_VERSION; +	r.query_seqno = pkt->hdr.seqno; +	hvsi_send_packet(pv, &r.hdr); + +	/* Assume protocol is open now */ +	pv->established = 1; +} + +static void hvsi_got_response(struct hvsi_priv *pv) +{ +	struct hvsi_query_response *r = +		(struct hvsi_query_response *)pv->inbuf; + +	switch(r->verb) { +	case VSV_SEND_MODEM_CTL_STATUS: +		hvsi_cd_change(pv, be32_to_cpu(r->u.mctrl_word) & HVSI_TSCD); +		pv->mctrl_update = 1; +		break; +	} +} + +static int hvsi_check_packet(struct hvsi_priv *pv) +{ +	u8 len, type; + +	/* Check header validity. If it's invalid, we ditch +	 * the whole buffer and hope we eventually resync +	 */ +	if (pv->inbuf[0] < 0xfc) { +		pv->inbuf_len = pv->inbuf_pktlen = 0; +		return 0; +	} +	type = pv->inbuf[0]; +	len = pv->inbuf[1]; + +	/* Packet incomplete ? */ +	if (pv->inbuf_len < len) +		return 0; + +	pr_devel("HVSI@%x: Got packet type %x len %d bytes:\n", +		 pv->termno, type, len); + +	/* We have a packet, yay ! Handle it */ +	switch(type) { +	case VS_DATA_PACKET_HEADER: +		pv->inbuf_pktlen = len - 4; +		pv->inbuf_cur = 4; +		return 1; +	case VS_CONTROL_PACKET_HEADER: +		hvsi_got_control(pv); +		break; +	case VS_QUERY_PACKET_HEADER: +		hvsi_got_query(pv); +		break; +	case VS_QUERY_RESPONSE_PACKET_HEADER: +		hvsi_got_response(pv); +		break; +	} + +	/* Swallow packet and retry */ +	pv->inbuf_len -= len; +	memmove(pv->inbuf, &pv->inbuf[len], pv->inbuf_len); +	return 1; +} + +static int hvsi_get_packet(struct hvsi_priv *pv) +{ +	/* If we have room in the buffer, ask HV for more */ +	if (pv->inbuf_len < HVSI_INBUF_SIZE) +		pv->inbuf_len += pv->get_chars(pv->termno, +					     &pv->inbuf[pv->inbuf_len], +					     HVSI_INBUF_SIZE - pv->inbuf_len); +	/* +	 * If we have at least 4 bytes in the buffer, check for +	 * a full packet and retry +	 */ +	if (pv->inbuf_len >= 4) +		return hvsi_check_packet(pv); +	return 0; +} + +int hvsilib_get_chars(struct hvsi_priv *pv, char *buf, int count) +{ +	unsigned int tries, read = 0; + +	if (WARN_ON(!pv)) +		return -ENXIO; + +	/* If we aren't open, don't do anything in order to avoid races +	 * with connection establishment. The hvc core will call this +	 * before we have returned from notifier_add(), and we need to +	 * avoid multiple users playing with the receive buffer +	 */ +	if (!pv->opened) +		return 0; + +	/* We try twice, once with what data we have and once more +	 * after we try to fetch some more from the hypervisor +	 */ +	for (tries = 1; count && tries < 2; tries++) { +		/* Consume existing data packet */ +		if (pv->inbuf_pktlen) { +			unsigned int l = min(count, (int)pv->inbuf_pktlen); +			memcpy(&buf[read], &pv->inbuf[pv->inbuf_cur], l); +			pv->inbuf_cur += l; +			pv->inbuf_pktlen -= l; +			count -= l; +			read += l; +		} +		if (count == 0) +			break; + +		/* Data packet fully consumed, move down remaning data */ +		if (pv->inbuf_cur) { +			pv->inbuf_len -= pv->inbuf_cur; +			memmove(pv->inbuf, &pv->inbuf[pv->inbuf_cur], +				pv->inbuf_len); +			pv->inbuf_cur = 0; +		} + +		/* Try to get another packet */ +		if (hvsi_get_packet(pv)) +			tries--; +	} +	if (!pv->established) { +		pr_devel("HVSI@%x: returning -EPIPE\n", pv->termno); +		return -EPIPE; +	} +	return read; +} + +int hvsilib_put_chars(struct hvsi_priv *pv, const char *buf, int count) +{ +	struct hvsi_data dp; +	int rc, adjcount = min(count, HVSI_MAX_OUTGOING_DATA); + +	if (WARN_ON(!pv)) +		return -ENODEV; + +	dp.hdr.type = VS_DATA_PACKET_HEADER; +	dp.hdr.len = adjcount + sizeof(struct hvsi_header); +	memcpy(dp.data, buf, adjcount); +	rc = hvsi_send_packet(pv, &dp.hdr); +	if (rc <= 0) +		return rc; +	return adjcount; +} + +static void maybe_msleep(unsigned long ms) +{ +	/* During early boot, IRQs are disabled, use mdelay */ +	if (irqs_disabled()) +		mdelay(ms); +	else +		msleep(ms); +} + +int hvsilib_read_mctrl(struct hvsi_priv *pv) +{ +	struct hvsi_query q; +	int rc, timeout; + +	pr_devel("HVSI@%x: Querying modem control status...\n", +		 pv->termno); + +	pv->mctrl_update = 0; +	q.hdr.type = VS_QUERY_PACKET_HEADER; +	q.hdr.len = sizeof(struct hvsi_query); +	q.verb = cpu_to_be16(VSV_SEND_MODEM_CTL_STATUS); +	rc = hvsi_send_packet(pv, &q.hdr); +	if (rc <= 0) { +		pr_devel("HVSI@%x: Error %d...\n", pv->termno, rc); +		return rc; +	} + +	/* Try for up to 200ms */ +	for (timeout = 0; timeout < 20; timeout++) { +		if (!pv->established) +			return -ENXIO; +		if (pv->mctrl_update) +			return 0; +		if (!hvsi_get_packet(pv)) +			maybe_msleep(10); +	} +	return -EIO; +} + +int hvsilib_write_mctrl(struct hvsi_priv *pv, int dtr) +{ +	struct hvsi_control ctrl; +	unsigned short mctrl; + +	mctrl = pv->mctrl; +	if (dtr) +		mctrl |= TIOCM_DTR; +	else +		mctrl &= ~TIOCM_DTR; +	if (mctrl == pv->mctrl) +		return 0; +	pv->mctrl = mctrl; + +	pr_devel("HVSI@%x: %s DTR...\n", pv->termno, +		 dtr ? "Setting" : "Clearing"); + +	ctrl.hdr.type = VS_CONTROL_PACKET_HEADER, +	ctrl.hdr.len = sizeof(struct hvsi_control); +	ctrl.verb = cpu_to_be16(VSV_SET_MODEM_CTL); +	ctrl.mask = cpu_to_be32(HVSI_TSDTR); +	ctrl.word = cpu_to_be32(dtr ? HVSI_TSDTR : 0); +	return hvsi_send_packet(pv, &ctrl.hdr); +} + +void hvsilib_establish(struct hvsi_priv *pv) +{ +	int timeout; + +	pr_devel("HVSI@%x: Establishing...\n", pv->termno); + +	/* Try for up to 200ms, there can be a packet to +	 * start the process waiting for us... +	 */ +	for (timeout = 0; timeout < 20; timeout++) { +		if (pv->established) +			goto established; +		if (!hvsi_get_packet(pv)) +			maybe_msleep(10); +	} + +	/* Failed, send a close connection packet just +	 * in case +	 */ +	pr_devel("HVSI@%x:   ... sending close\n", pv->termno); + +	hvsi_send_close(pv); + +	/* Then restart handshake */ + +	pr_devel("HVSI@%x:   ... restarting handshake\n", pv->termno); + +	hvsi_start_handshake(pv); + +	pr_devel("HVSI@%x:   ... waiting handshake\n", pv->termno); + +	/* Try for up to 400ms */ +	for (timeout = 0; timeout < 40; timeout++) { +		if (pv->established) +			goto established; +		if (!hvsi_get_packet(pv)) +			maybe_msleep(10); +	} + +	if (!pv->established) { +		pr_devel("HVSI@%x: Timeout handshaking, giving up !\n", +			 pv->termno); +		return; +	} + established: +	/* Query modem control lines */ + +	pr_devel("HVSI@%x:   ... established, reading mctrl\n", pv->termno); + +	hvsilib_read_mctrl(pv); + +	/* Set our own DTR */ + +	pr_devel("HVSI@%x:   ... setting mctrl\n", pv->termno); + +	hvsilib_write_mctrl(pv, 1); + +	/* Set the opened flag so reads are allowed */ +	wmb(); +	pv->opened = 1; +} + +int hvsilib_open(struct hvsi_priv *pv, struct hvc_struct *hp) +{ +	pr_devel("HVSI@%x: open !\n", pv->termno); + +	/* Keep track of the tty data structure */ +	pv->tty = tty_port_tty_get(&hp->port); + +	hvsilib_establish(pv); + +	return 0; +} + +void hvsilib_close(struct hvsi_priv *pv, struct hvc_struct *hp) +{ +	unsigned long flags; + +	pr_devel("HVSI@%x: close !\n", pv->termno); + +	if (!pv->is_console) { +		pr_devel("HVSI@%x: Not a console, tearing down\n", +			 pv->termno); + +		/* Clear opened, synchronize with khvcd */ +		spin_lock_irqsave(&hp->lock, flags); +		pv->opened = 0; +		spin_unlock_irqrestore(&hp->lock, flags); + +		/* Clear our own DTR */ +		if (!pv->tty || (pv->tty->termios.c_cflag & HUPCL)) +			hvsilib_write_mctrl(pv, 0); + +		/* Tear down the connection */ +		hvsi_send_close(pv); +	} + +	if (pv->tty) +		tty_kref_put(pv->tty); +	pv->tty = NULL; +} + +void hvsilib_init(struct hvsi_priv *pv, +		  int (*get_chars)(uint32_t termno, char *buf, int count), +		  int (*put_chars)(uint32_t termno, const char *buf, +				   int count), +		  int termno, int is_console) +{ +	memset(pv, 0, sizeof(*pv)); +	pv->get_chars = get_chars; +	pv->put_chars = put_chars; +	pv->termno = termno; +	pv->is_console = is_console; +}  | 
