diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/binfmt_elf.c | 30 | ||||
-rw-r--r-- | fs/binfmt_misc.c | 3 | ||||
-rw-r--r-- | fs/block_dev.c | 3 | ||||
-rw-r--r-- | fs/eventpoll.c | 45 | ||||
-rw-r--r-- | fs/fat/namei_vfat.c | 83 | ||||
-rw-r--r-- | fs/notify/notification.c | 3 | ||||
-rw-r--r-- | fs/pipe.c | 1 | ||||
-rw-r--r-- | fs/proc/array.c | 119 | ||||
-rw-r--r-- | fs/proc/kcore.c | 6 | ||||
-rw-r--r-- | fs/proc/namespaces.c | 2 | ||||
-rw-r--r-- | fs/proc/stat.c | 62 | ||||
-rw-r--r-- | fs/select.c | 40 | ||||
-rw-r--r-- | fs/seq_file.c | 84 |
13 files changed, 305 insertions, 176 deletions
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 81878b78c9d..504b6eee50a 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1093,6 +1093,29 @@ out: */ /* + * The purpose of always_dump_vma() is to make sure that special kernel mappings + * that are useful for post-mortem analysis are included in every core dump. + * In that way we ensure that the core dump is fully interpretable later + * without matching up the same kernel and hardware config to see what PC values + * meant. These special mappings include - vDSO, vsyscall, and other + * architecture specific mappings + */ +static bool always_dump_vma(struct vm_area_struct *vma) +{ + /* Any vsyscall mappings? */ + if (vma == get_gate_vma(vma->vm_mm)) + return true; + /* + * arch_vma_name() returns non-NULL for special architecture mappings, + * such as vDSO sections. + */ + if (arch_vma_name(vma)) + return true; + + return false; +} + +/* * Decide what to dump of a segment, part, all or none. */ static unsigned long vma_dump_size(struct vm_area_struct *vma, @@ -1100,10 +1123,13 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma, { #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type)) - /* The vma can be set up to tell us the answer directly. */ - if (vma->vm_flags & VM_ALWAYSDUMP) + /* always dump the vdso and vsyscall sections */ + if (always_dump_vma(vma)) goto whole; + if (vma->vm_flags & VM_NODUMP) + return 0; + /* Hugetlb memory check */ if (vma->vm_flags & VM_HUGETLB) { if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED)) diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index 1ffb60355ca..613aa061823 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -19,6 +19,7 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> +#include <linux/magic.h> #include <linux/binfmts.h> #include <linux/slab.h> #include <linux/ctype.h> @@ -699,7 +700,7 @@ static int bm_fill_super(struct super_block * sb, void * data, int silent) [3] = {"register", &bm_register_operations, S_IWUSR}, /* last one */ {""} }; - int err = simple_fill_super(sb, 0x42494e4d, bm_files); + int err = simple_fill_super(sb, BINFMTFS_MAGIC, bm_files); if (!err) sb->s_op = &s_ops; return err; diff --git a/fs/block_dev.c b/fs/block_dev.c index a9ff3000b83..e08f6a20a5b 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -16,6 +16,7 @@ #include <linux/blkdev.h> #include <linux/module.h> #include <linux/blkpg.h> +#include <linux/magic.h> #include <linux/buffer_head.h> #include <linux/swap.h> #include <linux/pagevec.h> @@ -506,7 +507,7 @@ static const struct super_operations bdev_sops = { static struct dentry *bd_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { - return mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, 0x62646576); + return mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, BDEVFS_MAGIC); } static struct file_system_type bd_type = { diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 4d9d3a45e35..629e9ed99d0 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -427,6 +427,31 @@ out_unlock: return error; } +/* + * As described in commit 0ccf831cb lockdep: annotate epoll + * the use of wait queues used by epoll is done in a very controlled + * manner. Wake ups can nest inside each other, but are never done + * with the same locking. For example: + * + * dfd = socket(...); + * efd1 = epoll_create(); + * efd2 = epoll_create(); + * epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...); + * epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...); + * + * When a packet arrives to the device underneath "dfd", the net code will + * issue a wake_up() on its poll wake list. Epoll (efd1) has installed a + * callback wakeup entry on that queue, and the wake_up() performed by the + * "dfd" net code will end up in ep_poll_callback(). At this point epoll + * (efd1) notices that it may have some event ready, so it needs to wake up + * the waiters on its poll wait list (efd2). So it calls ep_poll_safewake() + * that ends up in another wake_up(), after having checked about the + * recursion constraints. That are, no more than EP_MAX_POLLWAKE_NESTS, to + * avoid stack blasting. + * + * When CONFIG_DEBUG_LOCK_ALLOC is enabled, make sure lockdep can handle + * this special case of epoll. + */ #ifdef CONFIG_DEBUG_LOCK_ALLOC static inline void ep_wake_up_nested(wait_queue_head_t *wqueue, unsigned long events, int subclass) @@ -699,9 +724,12 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head, void *priv) { struct epitem *epi, *tmp; + poll_table pt; + init_poll_funcptr(&pt, NULL); list_for_each_entry_safe(epi, tmp, head, rdllink) { - if (epi->ffd.file->f_op->poll(epi->ffd.file, NULL) & + pt._key = epi->event.events; + if (epi->ffd.file->f_op->poll(epi->ffd.file, &pt) & epi->event.events) return POLLIN | POLLRDNORM; else { @@ -1049,13 +1077,11 @@ static int reverse_path_check_proc(void *priv, void *cookie, int call_nests) */ static int reverse_path_check(void) { - int length = 0; int error = 0; struct file *current_file; /* let's call this for all tfiles */ list_for_each_entry(current_file, &tfile_check_list, f_tfile_llink) { - length++; path_count_init(); error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, reverse_path_check_proc, current_file, @@ -1097,6 +1123,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, /* Initialize the poll table using the queue callback */ epq.epi = epi; init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); + epq.pt._key = event->events; /* * Attach the item to the poll hooks and get current event bits. @@ -1191,6 +1218,9 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even { int pwake = 0; unsigned int revents; + poll_table pt; + + init_poll_funcptr(&pt, NULL); /* * Set the new event interest mask before calling f_op->poll(); @@ -1198,13 +1228,14 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even * f_op->poll() call and the new event set registering. */ epi->event.events = event->events; + pt._key = event->events; epi->event.data = event->data; /* protected by mtx */ /* * Get current event bits. We can safely use the file* here because * its usage count has been increased by the caller of this function. */ - revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL); + revents = epi->ffd.file->f_op->poll(epi->ffd.file, &pt); /* * If the item is "hot" and it is not registered inside the ready @@ -1239,6 +1270,9 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, unsigned int revents; struct epitem *epi; struct epoll_event __user *uevent; + poll_table pt; + + init_poll_funcptr(&pt, NULL); /* * We can loop without lock because we are passed a task private list. @@ -1251,7 +1285,8 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, list_del_init(&epi->rdllink); - revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL) & + pt._key = epi->event.events; + revents = epi->ffd.file->f_op->poll(epi->ffd.file, &pt) & epi->event.events; /* diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c index a81eb2367d3..98ae804f527 100644 --- a/fs/fat/namei_vfat.c +++ b/fs/fat/namei_vfat.c @@ -521,57 +521,46 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname, op = &outname[*outlen * sizeof(wchar_t)]; } else { - if (nls) { - for (i = 0, ip = name, op = outname, *outlen = 0; - i < len && *outlen <= FAT_LFN_LEN; - *outlen += 1) - { - if (escape && (*ip == ':')) { - if (i > len - 5) - return -EINVAL; - ec = 0; - for (k = 1; k < 5; k++) { - nc = ip[k]; - ec <<= 4; - if (nc >= '0' && nc <= '9') { - ec |= nc - '0'; - continue; - } - if (nc >= 'a' && nc <= 'f') { - ec |= nc - ('a' - 10); - continue; - } - if (nc >= 'A' && nc <= 'F') { - ec |= nc - ('A' - 10); - continue; - } - return -EINVAL; + for (i = 0, ip = name, op = outname, *outlen = 0; + i < len && *outlen < FAT_LFN_LEN; + *outlen += 1) { + if (escape && (*ip == ':')) { + if (i > len - 5) + return -EINVAL; + ec = 0; + for (k = 1; k < 5; k++) { + nc = ip[k]; + ec <<= 4; + if (nc >= '0' && nc <= '9') { + ec |= nc - '0'; + continue; } - *op++ = ec & 0xFF; - *op++ = ec >> 8; - ip += 5; - i += 5; - } else { - if ((charlen = nls->char2uni(ip, len - i, (wchar_t *)op)) < 0) - return -EINVAL; - ip += charlen; - i += charlen; - op += 2; + if (nc >= 'a' && nc <= 'f') { + ec |= nc - ('a' - 10); + continue; + } + if (nc >= 'A' && nc <= 'F') { + ec |= nc - ('A' - 10); + continue; + } + return -EINVAL; } + *op++ = ec & 0xFF; + *op++ = ec >> 8; + ip += 5; + i += 5; + } else { + charlen = nls->char2uni(ip, len - i, + (wchar_t *)op); + if (charlen < 0) + return -EINVAL; + ip += charlen; + i += charlen; + op += 2; } - if (i < len) - return -ENAMETOOLONG; - } else { - for (i = 0, ip = name, op = outname, *outlen = 0; - i < len && *outlen <= FAT_LFN_LEN; - i++, *outlen += 1) - { - *op++ = *ip++; - *op++ = 0; - } - if (i < len) - return -ENAMETOOLONG; } + if (i < len) + return -ENAMETOOLONG; } *longlen = *outlen; diff --git a/fs/notify/notification.c b/fs/notify/notification.c index ee188158a22..c887b1378f7 100644 --- a/fs/notify/notification.c +++ b/fs/notify/notification.c @@ -447,7 +447,7 @@ struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, return event; } -__init int fsnotify_notification_init(void) +static __init int fsnotify_notification_init(void) { fsnotify_event_cachep = KMEM_CACHE(fsnotify_event, SLAB_PANIC); fsnotify_event_holder_cachep = KMEM_CACHE(fsnotify_event_holder, SLAB_PANIC); @@ -461,4 +461,3 @@ __init int fsnotify_notification_init(void) return 0; } subsys_initcall(fsnotify_notification_init); - diff --git a/fs/pipe.c b/fs/pipe.c index fe0502f9beb..25feaa3faac 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -13,6 +13,7 @@ #include <linux/fs.h> #include <linux/log2.h> #include <linux/mount.h> +#include <linux/magic.h> #include <linux/pipe_fs_i.h> #include <linux/uio.h> #include <linux/highmem.h> diff --git a/fs/proc/array.c b/fs/proc/array.c index c602b8d20f0..fbb53c24908 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -462,59 +462,56 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, /* convert nsec -> ticks */ start_time = nsec_to_clock_t(start_time); - seq_printf(m, "%d (%s) %c %d %d %d %d %d %u %lu \ -%lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \ -%lu %lu %lu %lu %lu %lu %lu %lu %d %d %u %u %llu %lu %ld %lu %lu %lu\n", - pid_nr_ns(pid, ns), - tcomm, - state, - ppid, - pgid, - sid, - tty_nr, - tty_pgrp, - task->flags, - min_flt, - cmin_flt, - maj_flt, - cmaj_flt, - cputime_to_clock_t(utime), - cputime_to_clock_t(stime), - cputime_to_clock_t(cutime), - cputime_to_clock_t(cstime), - priority, - nice, - num_threads, - start_time, - vsize, - mm ? get_mm_rss(mm) : 0, - rsslim, - mm ? (permitted ? mm->start_code : 1) : 0, - mm ? (permitted ? mm->end_code : 1) : 0, - (permitted && mm) ? mm->start_stack : 0, - esp, - eip, - /* The signal information here is obsolete. - * It must be decimal for Linux 2.0 compatibility. - * Use /proc/#/status for real-time signals. - */ - task->pending.signal.sig[0] & 0x7fffffffUL, - task->blocked.sig[0] & 0x7fffffffUL, - sigign .sig[0] & 0x7fffffffUL, - sigcatch .sig[0] & 0x7fffffffUL, - wchan, - 0UL, - 0UL, - task->exit_signal, - task_cpu(task), - task->rt_priority, - task->policy, - (unsigned long long)delayacct_blkio_ticks(task), - cputime_to_clock_t(gtime), - cputime_to_clock_t(cgtime), - (mm && permitted) ? mm->start_data : 0, - (mm && permitted) ? mm->end_data : 0, - (mm && permitted) ? mm->start_brk : 0); + seq_printf(m, "%d (%s) %c", pid_nr_ns(pid, ns), tcomm, state); + seq_put_decimal_ll(m, ' ', ppid); + seq_put_decimal_ll(m, ' ', pgid); + seq_put_decimal_ll(m, ' ', sid); + seq_put_decimal_ll(m, ' ', tty_nr); + seq_put_decimal_ll(m, ' ', tty_pgrp); + seq_put_decimal_ull(m, ' ', task->flags); + seq_put_decimal_ull(m, ' ', min_flt); + seq_put_decimal_ull(m, ' ', cmin_flt); + seq_put_decimal_ull(m, ' ', maj_flt); + seq_put_decimal_ull(m, ' ', cmaj_flt); + seq_put_decimal_ull(m, ' ', cputime_to_clock_t(utime)); + seq_put_decimal_ull(m, ' ', cputime_to_clock_t(stime)); + seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cutime)); + seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cstime)); + seq_put_decimal_ll(m, ' ', priority); + seq_put_decimal_ll(m, ' ', nice); + seq_put_decimal_ll(m, ' ', num_threads); + seq_put_decimal_ull(m, ' ', 0); + seq_put_decimal_ull(m, ' ', start_time); + seq_put_decimal_ull(m, ' ', vsize); + seq_put_decimal_ll(m, ' ', mm ? get_mm_rss(mm) : 0); + seq_put_decimal_ull(m, ' ', rsslim); + seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0); + seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0); + seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0); + seq_put_decimal_ull(m, ' ', esp); + seq_put_decimal_ull(m, ' ', eip); + /* The signal information here is obsolete. + * It must be decimal for Linux 2.0 compatibility. + * Use /proc/#/status for real-time signals. + */ + seq_put_decimal_ull(m, ' ', task->pending.signal.sig[0] & 0x7fffffffUL); + seq_put_decimal_ull(m, ' ', task->blocked.sig[0] & 0x7fffffffUL); + seq_put_decimal_ull(m, ' ', sigign.sig[0] & 0x7fffffffUL); + seq_put_decimal_ull(m, ' ', sigcatch.sig[0] & 0x7fffffffUL); + seq_put_decimal_ull(m, ' ', wchan); + seq_put_decimal_ull(m, ' ', 0); + seq_put_decimal_ull(m, ' ', 0); + seq_put_decimal_ll(m, ' ', task->exit_signal); + seq_put_decimal_ll(m, ' ', task_cpu(task)); + seq_put_decimal_ull(m, ' ', task->rt_priority); + seq_put_decimal_ull(m, ' ', task->policy); + seq_put_decimal_ull(m, ' ', delayacct_blkio_ticks(task)); + seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime)); + seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime)); + seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_data : 0); + seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->end_data : 0); + seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_brk : 0); + seq_putc(m, '\n'); if (mm) mmput(mm); return 0; @@ -542,8 +539,20 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, size = task_statm(mm, &shared, &text, &data, &resident); mmput(mm); } - seq_printf(m, "%lu %lu %lu %lu 0 %lu 0\n", - size, resident, shared, text, data); + /* + * For quick read, open code by putting numbers directly + * expected format is + * seq_printf(m, "%lu %lu %lu %lu 0 %lu 0\n", + * size, resident, shared, text, data); + */ + seq_put_decimal_ull(m, 0, size); + seq_put_decimal_ull(m, ' ', resident); + seq_put_decimal_ull(m, ' ', shared); + seq_put_decimal_ull(m, ' ', text); + seq_put_decimal_ull(m, ' ', 0); + seq_put_decimal_ull(m, ' ', text); + seq_put_decimal_ull(m, ' ', 0); + seq_putc(m, '\n'); return 0; } diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index e5e69aff6c6..86c67eee439 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -157,7 +157,8 @@ static int kcore_update_ram(void) #ifdef CONFIG_SPARSEMEM_VMEMMAP /* calculate vmemmap's address from given system ram pfn and register it */ -int get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) +static int +get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) { unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT; unsigned long nr_pages = ent->size >> PAGE_SHIFT; @@ -189,7 +190,8 @@ int get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) } #else -int get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) +static int +get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) { return 1; } diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c index 27da860115c..3551f1f839e 100644 --- a/fs/proc/namespaces.c +++ b/fs/proc/namespaces.c @@ -53,7 +53,7 @@ static struct dentry *proc_ns_instantiate(struct inode *dir, ei->ns_ops = ns_ops; ei->ns = ns; - dentry->d_op = &pid_dentry_operations; + d_set_d_op(dentry, &pid_dentry_operations); d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ if (pid_revalidate(dentry, NULL)) diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 121f77cfef7..6a0c62d6e44 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -89,18 +89,19 @@ static int show_stat(struct seq_file *p, void *v) } sum += arch_irq_stat(); - seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu " - "%llu\n", - (unsigned long long)cputime64_to_clock_t(user), - (unsigned long long)cputime64_to_clock_t(nice), - (unsigned long long)cputime64_to_clock_t(system), - (unsigned long long)cputime64_to_clock_t(idle), - (unsigned long long)cputime64_to_clock_t(iowait), - (unsigned long long)cputime64_to_clock_t(irq), - (unsigned long long)cputime64_to_clock_t(softirq), - (unsigned long long)cputime64_to_clock_t(steal), - (unsigned long long)cputime64_to_clock_t(guest), - (unsigned long long)cputime64_to_clock_t(guest_nice)); + seq_puts(p, "cpu "); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice)); + seq_putc(p, '\n'); + for_each_online_cpu(i) { /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ user = kcpustat_cpu(i).cpustat[CPUTIME_USER]; @@ -113,26 +114,24 @@ static int show_stat(struct seq_file *p, void *v) steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; - seq_printf(p, - "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu " - "%llu\n", - i, - (unsigned long long)cputime64_to_clock_t(user), - (unsigned long long)cputime64_to_clock_t(nice), - (unsigned long long)cputime64_to_clock_t(system), - (unsigned long long)cputime64_to_clock_t(idle), - (unsigned long long)cputime64_to_clock_t(iowait), - (unsigned long long)cputime64_to_clock_t(irq), - (unsigned long long)cputime64_to_clock_t(softirq), - (unsigned long long)cputime64_to_clock_t(steal), - (unsigned long long)cputime64_to_clock_t(guest), - (unsigned long long)cputime64_to_clock_t(guest_nice)); + seq_printf(p, "cpu%d", i); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice)); + seq_putc(p, '\n'); } seq_printf(p, "intr %llu", (unsigned long long)sum); /* sum again ? it could be updated? */ for_each_irq_nr(j) - seq_printf(p, " %u", kstat_irqs(j)); + seq_put_decimal_ull(p, ' ', kstat_irqs(j)); seq_printf(p, "\nctxt %llu\n" @@ -149,7 +148,7 @@ static int show_stat(struct seq_file *p, void *v) seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq); for (i = 0; i < NR_SOFTIRQS; i++) - seq_printf(p, " %u", per_softirq_sums[i]); + seq_put_decimal_ull(p, ' ', per_softirq_sums[i]); seq_putc(p, '\n'); return 0; @@ -157,11 +156,14 @@ static int show_stat(struct seq_file *p, void *v) static int stat_open(struct inode *inode, struct file *file) { - unsigned size = 4096 * (1 + num_possible_cpus() / 32); + unsigned size = 1024 + 128 * num_possible_cpus(); char *buf; struct seq_file *m; int res; + /* minimum size to display an interrupt count : 2 bytes */ + size += 2 * nr_irqs; + /* don't ask for more than the kmalloc() max size */ if (size > KMALLOC_MAX_SIZE) size = KMALLOC_MAX_SIZE; @@ -173,7 +175,7 @@ static int stat_open(struct inode *inode, struct file *file) if (!res) { m = file->private_data; m->buf = buf; - m->size = size; + m->size = ksize(buf); } else kfree(buf); return res; diff --git a/fs/select.c b/fs/select.c index e782258d0de..ecfd0b125ba 100644 --- a/fs/select.c +++ b/fs/select.c @@ -223,7 +223,7 @@ static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, get_file(filp); entry->filp = filp; entry->wait_address = wait_address; - entry->key = p->key; + entry->key = p->_key; init_waitqueue_func_entry(&entry->wait, pollwake); entry->wait.private = pwq; add_wait_queue(wait_address, &entry->wait); @@ -386,13 +386,11 @@ get_max: static inline void wait_key_set(poll_table *wait, unsigned long in, unsigned long out, unsigned long bit) { - if (wait) { - wait->key = POLLEX_SET; - if (in & bit) - wait->key |= POLLIN_SET; - if (out & bit) - wait->key |= POLLOUT_SET; - } + wait->_key = POLLEX_SET; + if (in & bit) + wait->_key |= POLLIN_SET; + if (out & bit) + wait->_key |= POLLOUT_SET; } int do_select(int n, fd_set_bits *fds, struct timespec *end_time) @@ -414,7 +412,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) poll_initwait(&table); wait = &table.pt; if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { - wait = NULL; + wait->_qproc = NULL; timed_out = 1; } @@ -459,17 +457,17 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) if ((mask & POLLIN_SET) && (in & bit)) { res_in |= bit; retval++; - wait = NULL; + wait->_qproc = NULL; } if ((mask & POLLOUT_SET) && (out & bit)) { res_out |= bit; retval++; - wait = NULL; + wait->_qproc = NULL; } if ((mask & POLLEX_SET) && (ex & bit)) { res_ex |= bit; retval++; - wait = NULL; + wait->_qproc = NULL; } } } @@ -481,7 +479,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) *rexp = res_ex; cond_resched(); } - wait = NULL; + wait->_qproc = NULL; if (retval || timed_out || signal_pending(current)) break; if (table.error) { @@ -720,7 +718,7 @@ struct poll_list { * interested in events matching the pollfd->events mask, and the result * matching that mask is both recorded in pollfd->revents and returned. The * pwait poll_table will be used by the fd-provided poll handler for waiting, - * if non-NULL. + * if pwait->_qproc is non-NULL. */ static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait) { @@ -738,9 +736,7 @@ static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait) if (file != NULL) { mask = DEFAULT_POLLMASK; if (file->f_op && file->f_op->poll) { - if (pwait) - pwait->key = pollfd->events | - POLLERR | POLLHUP; + pwait->_key = pollfd->events|POLLERR|POLLHUP; mask = file->f_op->poll(file, pwait); } /* Mask out unneeded events. */ @@ -763,7 +759,7 @@ static int do_poll(unsigned int nfds, struct poll_list *list, /* Optimise the no-wait case */ if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { - pt = NULL; + pt->_qproc = NULL; timed_out = 1; } @@ -781,22 +777,22 @@ static int do_poll(unsigned int nfds, struct poll_list *list, for (; pfd != pfd_end; pfd++) { /* * Fish for events. If we found one, record it - * and kill the poll_table, so we don't + * and kill poll_table->_qproc, so we don't * needlessly register any other waiters after * this. They'll get immediately deregistered * when we break out and return. */ if (do_pollfd(pfd, pt)) { count++; - pt = NULL; + pt->_qproc = NULL; } } } /* * All waiters have already been registered, so don't provide - * a poll_table to them on the next loop iteration. + * a poll_table->_qproc to them on the next loop iteration. */ - pt = NULL; + pt->_qproc = NULL; if (!count) { count = wait->error; if (signal_pending(current)) diff --git a/fs/seq_file.c b/fs/seq_file.c index aa242dc9937..46cfb067fc3 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -13,6 +13,22 @@ #include <asm/uaccess.h> #include <asm/page.h> + +/* + * seq_files have a buffer which can may overflow. When this happens a larger + * buffer is reallocated and all the data will be printed again. + * The overflow state is true when m->count == m->size. + */ +static bool seq_overflow(struct seq_file *m) +{ + return m->count == m->size; +} + +static void seq_set_overflow(struct seq_file *m) +{ + m->count = m->size; +} + /** * seq_open - initialize sequential file * @file: file we initialize @@ -92,7 +108,7 @@ static int traverse(struct seq_file *m, loff_t offset) error = 0; m->count = 0; } - if (m->count == m->size) + if (seq_overflow(m)) goto Eoverflow; if (pos + m->count > offset) { m->from = offset - pos; @@ -234,7 +250,7 @@ Fill: break; } err = m->op->show(m, p); - if (m->count == m->size || err) { + if (seq_overflow(m) || err) { m->count = offs; if (likely(err <= 0)) break; @@ -361,7 +377,7 @@ int seq_escape(struct seq_file *m, const char *s, const char *esc) *p++ = '0' + (c & 07); continue; } - m->count = m->size; + seq_set_overflow(m); return -1; } m->count = p - m->buf; @@ -383,7 +399,7 @@ int seq_printf(struct seq_file *m, const char *f, ...) return 0; } } - m->count = m->size; + seq_set_overflow(m); return -1; } EXPORT_SYMBOL(seq_printf); @@ -512,7 +528,7 @@ int seq_bitmap(struct seq_file *m, const unsigned long *bits, return 0; } } - m->count = m->size; + seq_set_overflow(m); return -1; } EXPORT_SYMBOL(seq_bitmap); @@ -528,7 +544,7 @@ int seq_bitmap_list(struct seq_file *m, const unsigned long *bits, return 0; } } - m->count = m->size; + seq_set_overflow(m); return -1; } EXPORT_SYMBOL(seq_bitmap_list); @@ -639,11 +655,63 @@ int seq_puts(struct seq_file *m, const char *s) m->count += len; return 0; } - m->count = m->size; + seq_set_overflow(m); return -1; } EXPORT_SYMBOL(seq_puts); +/* + * A helper routine for putting decimal numbers without rich format of printf(). + * only 'unsigned long long' is supported. + * This routine will put one byte delimiter + number into seq_file. + * This routine is very quick when you show lots of numbers. + * In usual cases, it will be better to use seq_printf(). It's easier to read. + */ +int seq_put_decimal_ull(struct seq_file *m, char delimiter, + unsigned long long num) +{ + int len; + + if (m->count + 2 >= m->size) /* we'll write 2 bytes at least */ + goto overflow; + + if (delimiter) + m->buf[m->count++] = delimiter; + + if (num < 10) { + m->buf[m->count++] = num + '0'; + return 0; + } + + len = num_to_str(m->buf + m->count, m->size - m->count, num); + if (!len) + goto overflow; + m->count += len; + return 0; +overflow: + seq_set_overflow(m); + return -1; +} +EXPORT_SYMBOL(seq_put_decimal_ull); + +int seq_put_decimal_ll(struct seq_file *m, char delimiter, + long long num) +{ + if (num < 0) { + if (m->count + 3 >= m->size) { + seq_set_overflow(m); + return -1; + } + if (delimiter) + m->buf[m->count++] = delimiter; + num = -num; + delimiter = '-'; + } + return seq_put_decimal_ull(m, delimiter, num); + +} +EXPORT_SYMBOL(seq_put_decimal_ll); + /** * seq_write - write arbitrary data to buffer * @seq: seq_file identifying the buffer to which data should be written @@ -659,7 +727,7 @@ int seq_write(struct seq_file *seq, const void *data, size_t len) seq->count += len; return 0; } - seq->count = seq->size; + seq_set_overflow(seq); return -1; } EXPORT_SYMBOL(seq_write); |