diff -Nru linux-2.4/arch/i386/kernel/entry.S linux-2.4.epoll/arch/i386/kernel/entry.S --- linux-2.4/arch/i386/kernel/entry.S 2003-12-29 11:25:47.520736360 -0800 +++ linux-2.4.epoll/arch/i386/kernel/entry.S 2003-12-29 11:28:20.770438864 -0800 @@ -658,9 +658,9 @@ .long SYMBOL_NAME(sys_ni_syscall) /* sys_free_hugepages */ .long SYMBOL_NAME(sys_ni_syscall) /* sys_exit_group */ .long SYMBOL_NAME(sys_ni_syscall) /* sys_lookup_dcookie */ - .long SYMBOL_NAME(sys_ni_syscall) /* sys_epoll_create */ - .long SYMBOL_NAME(sys_ni_syscall) /* sys_epoll_ctl 255 */ - .long SYMBOL_NAME(sys_ni_syscall) /* sys_epoll_wait */ + .long SYMBOL_NAME(sys_epoll_create) /* sys_epoll_create */ + .long SYMBOL_NAME(sys_epoll_ctl) /* sys_epoll_ctl 255 */ + .long SYMBOL_NAME(sys_epoll_wait) /* sys_epoll_wait */ .long SYMBOL_NAME(sys_ni_syscall) /* sys_remap_file_pages */ .long SYMBOL_NAME(sys_ni_syscall) /* sys_set_tid_address */ diff -Nru linux-2.4/arch/ia64/ia32/ia32_entry.S linux-2.4.epoll/arch/ia64/ia32/ia32_entry.S --- linux-2.4/arch/ia64/ia32/ia32_entry.S 2003-12-29 11:25:47.744702312 -0800 +++ linux-2.4.epoll/arch/ia64/ia32/ia32_entry.S 2003-12-29 11:28:20.771438712 -0800 @@ -421,10 +421,37 @@ data8 sys_ni_syscall /* reserved for Security */ data8 sys_gettid data8 sys_readahead /* 225 */ - data8 sys_ni_syscall - data8 sys_ni_syscall - data8 sys_ni_syscall - data8 sys_ni_syscall + data8 sys32_ni_syscall + data8 sys32_ni_syscall + data8 sys32_ni_syscall + data8 sys32_ni_syscall + data8 sys32_ni_syscall /* 230 */ + data8 sys32_ni_syscall + data8 sys32_ni_syscall + data8 sys32_ni_syscall + data8 sys32_ni_syscall + data8 sys32_ni_syscall /* 235 */ + data8 sys32_ni_syscall + data8 sys32_ni_syscall + data8 sys32_ni_syscall + data8 sys32_ni_syscall + data8 sys32_ni_syscall /* 240 */ + data8 sys32_ni_syscall + data8 sys32_ni_syscall + data8 sys32_ni_syscall + data8 sys32_ni_syscall + data8 sys32_ni_syscall /* 245 */ + data8 sys32_ni_syscall + data8 sys32_ni_syscall + data8 sys32_ni_syscall + data8 sys32_ni_syscall + data8 sys32_ni_syscall /* 250 */ + data8 sys32_ni_syscall + data8 sys32_ni_syscall + data8 sys32_ni_syscall + data8 sys_epoll_create + data8 sys32_epoll_ctl /* 255 */ + data8 sys32_epoll_wait /* * CAUTION: If any system calls are added beyond this point * then the check in `arch/ia64/kernel/ivt.S' will have diff -Nru linux-2.4/arch/ia64/ia32/sys_ia32.c linux-2.4.epoll/arch/ia64/ia32/sys_ia32.c --- linux-2.4/arch/ia64/ia32/sys_ia32.c 2003-12-29 11:25:47.756700488 -0800 +++ linux-2.4.epoll/arch/ia64/ia32/sys_ia32.c 2003-12-29 11:28:20.857425640 -0800 @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include @@ -4044,6 +4045,82 @@ goto out; } +/* Structure for ia32 emulation on ia64 */ +struct epoll_event32 +{ + u32 events; + u64 data; +} __attribute__((packed)); + +asmlinkage long +sys32_epoll_ctl(int epfd, int op, int fd, struct epoll_event32 *event) +{ + mm_segment_t old_fs = get_fs(); + struct epoll_event event64; + int error = -EFAULT; + u32 data_halfword; + + if ((error = verify_area(VERIFY_READ, event, + sizeof(struct epoll_event32)))) + return error; + + __get_user(event64.events, &event->events); + __get_user(data_halfword, (u32*)(&event->data)); + event64.data = data_halfword; + __get_user(data_halfword, ((u32*)(&event->data) + 1)); + event64.data |= ((u64)data_halfword) << 32; + + set_fs(KERNEL_DS); + error = sys_epoll_ctl(epfd, op, fd, &event64); + set_fs(old_fs); + + return error; +} + +asmlinkage long +sys32_epoll_wait(int epfd, struct epoll_event32 *events, int maxevents, + int timeout) +{ + struct epoll_event *events64 = NULL; + mm_segment_t old_fs = get_fs(); + int i, error, nevents; + + if (maxevents <= 0) + return -EINVAL; + + /* Verify that the area passed by the user is writeable */ + if ((error = verify_area(VERIFY_WRITE, events, + maxevents * sizeof(struct epoll_event32)))) + return error; + + /* Allocate the space needed for the intermediate copy */ + events64 = kmalloc(maxevents * sizeof(struct epoll_event), GFP_KERNEL); + if (events64 == NULL) + return -ENOMEM; + + /* Do the system call */ + set_fs(KERNEL_DS); /* copy_to/from_user should work on kernel mem*/ + error = nevents = sys_epoll_wait(epfd, events64, maxevents, timeout); + set_fs(old_fs); + + /* Don't modify userspace memory if we're returning an error */ + if (!error) { + /* Translate the 64-bit structures back into the 32-bit + structures */ + for (i = 0; i < nevents; i++) { + __put_user(events64[i].events, + &events[i].events); + __put_user((u32)(events64[i].data), + (u32*)(&events[i].data)); + __put_user((u32)(events64[i].data >> 32), + ((u32*)(&events[i].data) + 1)); + } + } + + kfree(events64); + return error; +} + #ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */ struct ncp_mount_data32 { diff -Nru linux-2.4/arch/ia64/kernel/entry.S linux-2.4.epoll/arch/ia64/kernel/entry.S --- linux-2.4/arch/ia64/kernel/entry.S 2003-12-29 11:25:47.804693192 -0800 +++ linux-2.4.epoll/arch/ia64/kernel/entry.S 2003-12-29 11:28:20.859425336 -0800 @@ -1407,9 +1407,9 @@ data8 ia64_ni_syscall // 1240 data8 ia64_ni_syscall data8 ia64_ni_syscall - data8 ia64_ni_syscall - data8 ia64_ni_syscall - data8 ia64_ni_syscall // 1245 + data8 sys_epoll_create + data8 sys_epoll_ctl + data8 sys_epoll_wait // 1245 data8 ia64_ni_syscall data8 ia64_ni_syscall data8 ia64_ni_syscall diff -Nru linux-2.4/arch/ia64/kernel/ivt.S linux-2.4.epoll/arch/ia64/kernel/ivt.S --- linux-2.4/arch/ia64/kernel/ivt.S 2003-12-29 11:25:47.809692432 -0800 +++ linux-2.4.epoll/arch/ia64/kernel/ivt.S 2003-12-29 11:28:20.901418952 -0800 @@ -1488,7 +1488,7 @@ alloc r15=ar.pfs,0,0,6,0 // must first in an insn group ;; ld4 r8=[r14],8 // r8 == eax (syscall number) - mov r15=230 // number of entries in ia32 system call table + mov r15=257 // number of entries in ia32 system call table ;; cmp.ltu.unc p6,p7=r8,r15 ld4 out1=[r14],8 // r9 == ecx diff -Nru linux-2.4/arch/sparc64/solaris/timod.c linux-2.4.epoll/arch/sparc64/solaris/timod.c --- linux-2.4/arch/sparc64/solaris/timod.c 2003-12-29 11:25:59.787871472 -0800 +++ linux-2.4.epoll/arch/sparc64/solaris/timod.c 2003-12-29 11:28:20.902418800 -0800 @@ -651,10 +651,11 @@ SOLD("LISTEN done"); } if (!(filp->f_flags & O_NONBLOCK)) { - poll_table wait_table, *wait; + struct poll_wqueues wait_table; + poll_table *wait; poll_initwait(&wait_table); - wait = &wait_table; + wait = &wait_table.pt; for(;;) { SOLD("loop"); set_current_state(TASK_INTERRUPTIBLE); diff -Nru linux-2.4/fs/eventpoll.c linux-2.4.epoll/fs/eventpoll.c --- linux-2.4/fs/eventpoll.c 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.4.epoll/fs/eventpoll.c 2003-12-29 11:28:28.269298864 -0800 @@ -0,0 +1,1773 @@ +/* + * fs/eventpoll.c ( Efficent event polling implementation ) + * Copyright (C) 2001,...,2003 Davide Libenzi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Davide Libenzi + * + * 11 December 2002 + * Ported from 2.5.51 - janetinc@us.ibm.com + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * LOCKING: + * There are three level of locking required by epoll : + * + * 1) epsem (semaphore) + * 2) ep->sem (rw_semaphore) + * 3) ep->lock (rw_lock) + * + * The acquire order is the one listed above, from 1 to 3. + * We need a spinlock (ep->lock) because we manipulate objects + * from inside the poll callback, that might be triggered from + * a wake_up() that in turn might be called from IRQ context. + * So we can't sleep inside the poll callback and hence we need + * a spinlock. During the event transfer loop (from kernel to + * user space) we could end up sleeping due a copy_to_user(), so + * we need a lock that will allow us to sleep. This lock is a + * read-write semaphore (ep->sem). It is acquired on read during + * the event transfer loop and in write during epoll_ctl(EPOLL_CTL_DEL) + * and during eventpoll_release(). Then we also need a global + * semaphore to serialize eventpoll_release() and ep_free(). + * This semaphore is acquired by ep_free() during the epoll file + * cleanup path and it is also acquired by eventpoll_release() + * if a file has been pushed inside an epoll set and it is then + * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). + * It is possible to drop the "ep->sem" and to use the global + * semaphore "epsem" (together with "ep->lock") to have it working, + * but having "ep->sem" will make the interface more scalable. + * Events that require holding "epsem" are very rare, while for + * normal operations the epoll private "ep->sem" will guarantee + * a greater scalability. + */ + + +#define EVENTPOLLFS_MAGIC 0x03111965 /* My birthday should work for this :) */ + +#define DEBUG_EPOLL 0 + +#if DEBUG_EPOLL > 0 +#define DPRINTK(x) printk x +#define DNPRINTK(n, x) do { if ((n) <= DEBUG_EPOLL) printk x; } while (0) +#else /* #if DEBUG_EPOLL > 0 */ +#define DPRINTK(x) (void) 0 +#define DNPRINTK(n, x) (void) 0 +#endif /* #if DEBUG_EPOLL > 0 */ + +#define DEBUG_EPI 0 + +#if DEBUG_EPI != 0 +#define EPI_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */) +#else /* #if DEBUG_EPI != 0 */ +#define EPI_SLAB_DEBUG 0 +#endif /* #if DEBUG_EPI != 0 */ + +/* Epoll private bits inside the event mask */ +#define EP_PRIVATE_BITS (EPOLLONESHOT | EPOLLET) + +/* Maximum number of poll wake up nests we are allowing */ +#define EP_MAX_POLLWAKE_NESTS 4 + +/* Maximum size of the hash in bits ( 2^N ) */ +#define EP_MAX_HASH_BITS 17 + +/* Minimum size of the hash in bits ( 2^N ) */ +#define EP_MIN_HASH_BITS 9 + +/* Number of hash entries ( "struct list_head" ) inside a page */ +#define EP_HENTRY_X_PAGE (PAGE_SIZE / sizeof(struct list_head)) + +/* Maximum size of the hash in pages */ +#define EP_MAX_HPAGES ((1 << EP_MAX_HASH_BITS) / EP_HENTRY_X_PAGE + 1) + +/* Number of pages allocated for an "hbits" sized hash table */ +#define EP_HASH_PAGES(hbits) ((int) ((1 << (hbits)) / EP_HENTRY_X_PAGE + \ + ((1 << (hbits)) % EP_HENTRY_X_PAGE ? 1: 0))) + +/* Macro to allocate a "struct epitem" from the slab cache */ +#define EPI_MEM_ALLOC() (struct epitem *) kmem_cache_alloc(epi_cache, SLAB_KERNEL) + +/* Macro to free a "struct epitem" to the slab cache */ +#define EPI_MEM_FREE(p) kmem_cache_free(epi_cache, p) + +/* Macro to allocate a "struct eppoll_entry" from the slab cache */ +#define PWQ_MEM_ALLOC() (struct eppoll_entry *) kmem_cache_alloc(pwq_cache, SLAB_KERNEL) + +/* Macro to free a "struct eppoll_entry" to the slab cache */ +#define PWQ_MEM_FREE(p) kmem_cache_free(pwq_cache, p) + +/* Fast test to see if the file is an evenpoll file */ +#define IS_FILE_EPOLL(f) ((f)->f_op == &eventpoll_fops) + +/* + * Remove the item from the list and perform its initialization. + * This is usefull for us because we can test if the item is linked + * using "EP_IS_LINKED(p)". + */ +#define EP_LIST_DEL(p) do { list_del(p); INIT_LIST_HEAD(p); } while (0) + +/* Tells us if the item is currently linked */ +#define EP_IS_LINKED(p) (!list_empty(p)) + +/* Get the "struct epitem" from a wait queue pointer */ +#define EP_ITEM_FROM_WAIT(p) ((struct epitem *) container_of(p, struct eppoll_entry, wait)->base) + +/* Get the "struct epitem" from an epoll queue wrapper */ +#define EP_ITEM_FROM_EPQUEUE(p) (container_of(p, struct ep_pqueue, pt)->epi) + +/* + * This is used to optimize the event transfer to userspace. Since this + * is kept on stack, it should be pretty small. + */ +#define EP_MAX_BUF_EVENTS 32 + + + +/* + * Node that is linked into the "wake_task_list" member of the "struct poll_safewake". + * It is used to keep track on all tasks that are currently inside the wake_up() code + * to 1) short-circuit the one coming from the same task and same wait queue head + * ( loop ) 2) allow a maximum number of epoll descriptors inclusion nesting + * 3) let go the ones coming from other tasks. + */ +struct wake_task_node { + struct list_head llink; + task_t *task; + wait_queue_head_t *wq; +}; + +/* + * This is used to implement the safe poll wake up avoiding to reenter + * the poll callback from inside wake_up(). + */ +struct poll_safewake { + struct list_head wake_task_list; + spinlock_t lock; +}; + +/* + * This structure is stored inside the "private_data" member of the file + * structure and represent the main data sructure for the eventpoll + * interface. + */ +struct eventpoll { + /* Protect the this structure access */ + rwlock_t lock; + + /* + * This semaphore is used to ensure that files are not removed + * while epoll is using them. This is read-held during the event + * collection loop and it is write-held during the file cleanup + * path, the epoll file exit code and the ctl operations. + */ + struct rw_semaphore sem; + + /* Wait queue used by sys_epoll_wait() */ + wait_queue_head_t wq; + + /* Wait queue used by file->poll() */ + wait_queue_head_t poll_wait; + + /* List of ready file descriptors */ + struct list_head rdllist; + + /* Size of the hash */ + unsigned int hashbits; + + /* Pages for the "struct epitem" hash */ + char *hpages[EP_MAX_HPAGES]; +}; + +/* Wait structure used by the poll hooks */ +struct eppoll_entry { + /* List header used to link this structure to the "struct epitem" */ + struct list_head llink; + + /* The "base" pointer is set to the container "struct epitem" */ + void *base; + + /* + * Wait queue item that will be linked to the target file wait + * queue head. + */ + wait_queue_t wait; + + /* The wait queue head that linked the "wait" wait queue item */ + wait_queue_head_t *whead; +}; + +/* + * Each file descriptor added to the eventpoll interface will + * have an entry of this type linked to the hash. + */ +struct epitem { + /* List header used to link this structure to the eventpoll hash */ + struct list_head llink; + + /* List header used to link this structure to the eventpoll ready list */ + struct list_head rdllink; + + /* Number of active wait queue attached to poll operations */ + int nwait; + + /* List containing poll wait queues */ + struct list_head pwqlist; + + /* The "container" of this item */ + struct eventpoll *ep; + + /* The file descriptor this item refers to */ + int fd; + + /* The file this item refers to */ + struct file *file; + + /* The structure that describe the interested events and the source fd */ + struct epoll_event event; + + /* + * Used to keep track of the usage count of the structure. This avoids + * that the structure will desappear from underneath our processing. + */ + atomic_t usecnt; + + /* List header used to link this item to the "struct file" items list */ + struct list_head fllink; + + /* List header used to link the item to the transfer list */ + struct list_head txlink; + + /* + * This is used during the collection/transfer of events to userspace + * to pin items empty events set. + */ + unsigned int revents; +}; + +/* Wrapper struct used by poll queueing */ +struct ep_pqueue { + poll_table pt; + struct epitem *epi; +}; + + + +static void ep_poll_safewake_init(struct poll_safewake *psw); +static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq); +static unsigned int ep_get_hash_bits(unsigned int hintsize); +static int ep_getfd(int *efd, struct inode **einode, struct file **efile); +static int ep_alloc_pages(char **pages, int numpages); +static int ep_free_pages(char **pages, int numpages); +static int ep_file_init(struct file *file, unsigned int hashbits); +static unsigned int ep_hash_index(struct eventpoll *ep, struct file *file, int fd); +static struct list_head *ep_hash_entry(struct eventpoll *ep, unsigned int index); +static int ep_init(struct eventpoll *ep, unsigned int hashbits); +static void ep_free(struct eventpoll *ep); +static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd); +static void ep_use_epitem(struct epitem *epi); +static void ep_release_epitem(struct epitem *epi); +static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, + poll_table *pt); +static int ep_insert(struct eventpoll *ep, struct epoll_event *event, + struct file *tfile, int fd); +static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event); +static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi); +static int ep_unlink(struct eventpoll *ep, struct epitem *epi); +static int ep_remove(struct eventpoll *ep, struct epitem *epi); +static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync); +static int ep_eventpoll_close(struct inode *inode, struct file *file); +static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait); +static int ep_collect_ready_items(struct eventpoll *ep, struct list_head *txlist, int maxevents); +static int ep_send_events(struct eventpoll *ep, struct list_head *txlist, + struct epoll_event *events); +static void ep_reinject_items(struct eventpoll *ep, struct list_head *txlist); +static int ep_events_transfer(struct eventpoll *ep, struct epoll_event *events, int maxevents); +static int ep_poll(struct eventpoll *ep, struct epoll_event *events, int maxevents, + long timeout); +static int eventpollfs_statfs(struct super_block *sb, struct statfs *buf); +static struct super_block *eventpollfs_read_super(struct super_block *sb, void *data, int silent); +static int eventpollfs_delete_dentry(struct dentry *dentry); +static struct inode *ep_eventpoll_inode(void); + +/* + * This semaphore is used to serialize ep_free() and eventpoll_release(). + */ +struct semaphore epsem; + +/* Safe wake up implementation */ +static struct poll_safewake psw; + +/* Slab cache used to allocate "struct epitem" */ +static kmem_cache_t *epi_cache; + +/* Slab cache used to allocate "struct eppoll_entry" */ +static kmem_cache_t *pwq_cache; + +/* Virtual fs used to allocate inodes for eventpoll files */ +static struct vfsmount *eventpoll_mnt; + +/* File callbacks that implement the eventpoll file behaviour */ +static struct file_operations eventpoll_fops = { + .release = ep_eventpoll_close, + .poll = ep_eventpoll_poll +}; + +/* Virtual fs operations */ +static struct super_operations eventpollfs_ops = { + .statfs = eventpollfs_statfs, +}; + +/* Virtual fs structure declaration */ +static DECLARE_FSTYPE(eventpoll_fs_type, "eventpollfs", eventpollfs_read_super, FS_NOMOUNT); + +/* Very basic directory entry operations for the eventpoll virtual file system */ +static struct dentry_operations eventpollfs_dentry_operations = { + .d_delete = eventpollfs_delete_dentry, +}; + + + + +/* Initialize the poll safe wake up structure */ +static void ep_poll_safewake_init(struct poll_safewake *psw) +{ + + INIT_LIST_HEAD(&psw->wake_task_list); + spin_lock_init(&psw->lock); +} + + +/* + * Perform a safe wake up of the poll wait list. The problem is that + * with the new callback'd wake up system, it is possible that the + * poll callback is reentered from inside the call to wake_up() done + * on the poll wait queue head. The rule is that we cannot reenter the + * wake up code from the same task more than EP_MAX_POLLWAKE_NESTS times, + * and we cannot reenter the same wait queue head at all. This will + * enable to have a hierarchy of epoll file descriptor of no more than + * EP_MAX_POLLWAKE_NESTS deep. We need the irq version of the spin lock + * because this one gets called by the poll callback, that in turn is called + * from inside a wake_up(), that might be called from irq context. + */ +static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq) +{ + int wake_nests = 0; + unsigned long flags; + task_t *this_task = current; + struct list_head *lsthead = &psw->wake_task_list, *lnk; + struct wake_task_node *tncur; + struct wake_task_node tnode; + + spin_lock_irqsave(&psw->lock, flags); + + /* Try to see if the current task is already inside this wakeup call */ + list_for_each(lnk, lsthead) { + tncur = list_entry(lnk, struct wake_task_node, llink); + + if (tncur->wq == wq || + (tncur->task == this_task && ++wake_nests > EP_MAX_POLLWAKE_NESTS)) { + /* + * Ops ... loop detected or maximum nest level reached. + * We abort this wake by breaking the cycle itself. + */ + spin_unlock_irqrestore(&psw->lock, flags); + return; + } + } + + /* Add the current task to the list */ + tnode.task = this_task; + tnode.wq = wq; + list_add(&tnode.llink, lsthead); + + spin_unlock_irqrestore(&psw->lock, flags); + + /* Do really wake up now */ + wake_up(wq); + + /* Remove the current task from the list */ + spin_lock_irqsave(&psw->lock, flags); + list_del(&tnode.llink); + spin_unlock_irqrestore(&psw->lock, flags); +} + + +/* + * Calculate the size of the hash in bits. The returned size will be + * bounded between EP_MIN_HASH_BITS and EP_MAX_HASH_BITS. + */ +static unsigned int ep_get_hash_bits(unsigned int hintsize) +{ + unsigned int i, val; + + for (i = 0, val = 1; val < hintsize && i < EP_MAX_HASH_BITS; i++, val <<= 1); + return i < EP_MIN_HASH_BITS ? EP_MIN_HASH_BITS: i; +} + + +/* Used to initialize the epoll bits inside the "struct file" */ +void eventpoll_init_file(struct file *file) +{ + + INIT_LIST_HEAD(&file->f_ep_links); + spin_lock_init(&file->f_ep_lock); +} + + +/* + * This is called from eventpoll_release() to unlink files from the eventpoll + * interface. We need to have this facility to cleanup correctly files that are + * closed without being removed from the eventpoll interface. + */ +void eventpoll_release_file(struct file *file) +{ + struct list_head *lsthead = &file->f_ep_links; + struct eventpoll *ep; + struct epitem *epi; + + /* + * We don't want to get "file->f_ep_lock" because it is not + * necessary. It is not necessary because we're in the "struct file" + * cleanup path, and this means that noone is using this file anymore. + * The only hit might come from ep_free() but by holding the semaphore + * will correctly serialize the operation. We do need to acquire + * "ep->sem" after "epsem" because ep_remove() requires it when called + * from anywhere but ep_free(). + */ + down(&epsem); + + while (!list_empty(lsthead)) { + epi = list_entry(lsthead->next, struct epitem, fllink); + + ep = epi->ep; + EP_LIST_DEL(&epi->fllink); + down_write(&ep->sem); + ep_remove(ep, epi); + up_write(&ep->sem); + } + + up(&epsem); +} + + +/* + * It opens an eventpoll file descriptor by suggesting a storage of "size" + * file descriptors. The size parameter is just an hint about how to size + * data structures. It won't prevent the user to store more than "size" + * file descriptors inside the epoll interface. It is the kernel part of + * the userspace epoll_create(2). + */ +asmlinkage long sys_epoll_create(int size) +{ + int error, fd; + unsigned int hashbits; + struct inode *inode; + struct file *file; + + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d)\n", + current, size)); + + /* Correctly size the hash */ + hashbits = ep_get_hash_bits((unsigned int) size); + + /* + * Creates all the items needed to setup an eventpoll file. That is, + * a file structure, and inode and a free file descriptor. + */ + error = ep_getfd(&fd, &inode, &file); + if (error) + goto eexit_1; + + /* Setup the file internal data structure ( "struct eventpoll" ) */ + error = ep_file_init(file, hashbits); + if (error) + goto eexit_2; + + + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n", + current, size, fd)); + + return fd; + +eexit_2: + sys_close(fd); +eexit_1: + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n", + current, size, error)); + return error; +} + + +/* + * The following function implements the controller interface for the eventpoll + * file that enables the insertion/removal/change of file descriptors inside + * the interest set. It represents the kernel part of the user space epoll_ctl(2). + */ +asmlinkage long sys_epoll_ctl(int epfd, int op, int fd, struct epoll_event *event) +{ + int error; + struct file *file, *tfile; + struct eventpoll *ep; + struct epitem *epi; + struct epoll_event epds; + + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p)\n", + current, epfd, op, fd, event)); + + error = -EFAULT; + if (copy_from_user(&epds, event, sizeof(struct epoll_event))) + goto eexit_1; + + /* Get the "struct file *" for the eventpoll file */ + error = -EBADF; + file = fget(epfd); + if (!file) + goto eexit_1; + + /* Get the "struct file *" for the target file */ + tfile = fget(fd); + if (!tfile) + goto eexit_2; + + /* The target file descriptor must support poll */ + error = -EPERM; + if (!tfile->f_op || !tfile->f_op->poll) + goto eexit_3; + + /* + * We have to check that the file structure underneath the file descriptor + * the user passed to us _is_ an eventpoll file. And also we do not permit + * adding an epoll file descriptor inside itself. + */ + error = -EINVAL; + if (file == tfile || !IS_FILE_EPOLL(file)) + goto eexit_3; + + /* + * At this point it is safe to assume that the "private_data" contains + * our own data structure. + */ + ep = file->private_data; + + down_write(&ep->sem); + + /* Try to lookup the file inside our hash table */ + epi = ep_find(ep, tfile, fd); + + error = -EINVAL; + switch (op) { + case EPOLL_CTL_ADD: + if (!epi) { + epds.events |= POLLERR | POLLHUP; + + error = ep_insert(ep, &epds, tfile, fd); + } else + error = -EEXIST; + break; + case EPOLL_CTL_DEL: + if (epi) + error = ep_remove(ep, epi); + else + error = -ENOENT; + break; + case EPOLL_CTL_MOD: + if (epi) { + epds.events |= POLLERR | POLLHUP; + error = ep_modify(ep, epi, &epds); + } else + error = -ENOENT; + break; + } + + /* + * The function ep_find() increments the usage count of the structure + * so, if this is not NULL, we need to release it. + */ + if (epi) + ep_release_epitem(epi); + + up_write(&ep->sem); + +eexit_3: + fput(tfile); +eexit_2: + fput(file); +eexit_1: + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p) = %d\n", + current, epfd, op, fd, event, error)); + + return error; +} + + +/* + * Implement the event wait interface for the eventpoll file. It is the kernel + * part of the user space epoll_wait(2). + */ +asmlinkage long sys_epoll_wait(int epfd, struct epoll_event *events, int maxevents, + int timeout) +{ + int error; + struct file *file; + struct eventpoll *ep; + + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d)\n", + current, epfd, events, maxevents, timeout)); + + /* The maximum number of event must be greater than zero */ + if (maxevents <= 0) + return -EINVAL; + + /* Verify that the area passed by the user is writeable */ + if ((error = verify_area(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event)))) + goto eexit_1; + + /* Get the "struct file *" for the eventpoll file */ + error = -EBADF; + file = fget(epfd); + if (!file) + goto eexit_1; + + /* + * We have to check that the file structure underneath the file descriptor + * the user passed to us _is_ an eventpoll file. + */ + error = -EINVAL; + if (!IS_FILE_EPOLL(file)) + goto eexit_2; + + /* + * At this point it is safe to assume that the "private_data" contains + * our own data structure. + */ + ep = file->private_data; + + /* Time to fish for events ... */ + error = ep_poll(ep, events, maxevents, timeout); + +eexit_2: + fput(file); +eexit_1: + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d) = %d\n", + current, epfd, events, maxevents, timeout, error)); + + return error; +} + + +/* + * Creates the file descriptor to be used by the epoll interface. + */ +static int ep_getfd(int *efd, struct inode **einode, struct file **efile) +{ + struct qstr this; + char name[32]; + struct dentry *dentry; + struct inode *inode; + struct file *file; + int error, fd; + + /* Get an ready to use file */ + error = -ENFILE; + file = get_empty_filp(); + if (!file) + goto eexit_1; + + /* Allocates an inode from the eventpoll file system */ + inode = ep_eventpoll_inode(); + error = PTR_ERR(inode); + if (IS_ERR(inode)) + goto eexit_2; + + /* Allocates a free descriptor to plug the file onto */ + error = get_unused_fd(); + if (error < 0) + goto eexit_3; + fd = error; + + /* + * Link the inode to a directory entry by creating a unique name + * using the inode number. + */ + error = -ENOMEM; + sprintf(name, "[%lu]", inode->i_ino); + this.name = name; + this.len = strlen(name); + this.hash = inode->i_ino; + dentry = d_alloc(eventpoll_mnt->mnt_sb->s_root, &this); + if (!dentry) + goto eexit_4; + dentry->d_op = &eventpollfs_dentry_operations; + d_add(dentry, inode); + file->f_vfsmnt = mntget(eventpoll_mnt); + file->f_dentry = dget(dentry); + + file->f_pos = 0; + file->f_flags = O_RDONLY; + file->f_op = &eventpoll_fops; + file->f_mode = FMODE_READ; + file->f_version = 0; + file->private_data = NULL; + + /* Install the new setup file into the allocated fd. */ + fd_install(fd, file); + + *efd = fd; + *einode = inode; + *efile = file; + return 0; + +eexit_4: + put_unused_fd(fd); +eexit_3: + iput(inode); +eexit_2: + put_filp(file); +eexit_1: + return error; +} + + +static int ep_alloc_pages(char **pages, int numpages) +{ + int i; + + for (i = 0; i < numpages; i++) { + pages[i] = (char *) __get_free_pages(GFP_KERNEL, 0); + if (!pages[i]) { + for (--i; i >= 0; i--) { + ClearPageReserved(virt_to_page(pages[i])); + free_pages((unsigned long) pages[i], 0); + } + return -ENOMEM; + } + SetPageReserved(virt_to_page(pages[i])); + } + return 0; +} + + +static int ep_free_pages(char **pages, int numpages) +{ + int i; + + for (i = 0; i < numpages; i++) { + ClearPageReserved(virt_to_page(pages[i])); + free_pages((unsigned long) pages[i], 0); + } + return 0; +} + + +static int ep_file_init(struct file *file, unsigned int hashbits) +{ + int error; + struct eventpoll *ep; + + if (!(ep = kmalloc(sizeof(struct eventpoll), GFP_KERNEL))) + return -ENOMEM; + + memset(ep, 0, sizeof(*ep)); + + error = ep_init(ep, hashbits); + if (error) { + kfree(ep); + return error; + } + + file->private_data = ep; + + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_file_init() ep=%p\n", + current, ep)); + return 0; +} + + +/* + * Calculate the index of the hash relative to "file". + */ +static unsigned int ep_hash_index(struct eventpoll *ep, struct file *file, int fd) +{ + unsigned long ptr = (unsigned long) file ^ (fd << ep->hashbits); + + return (unsigned int) hash_ptr((void *) ptr, ep->hashbits); +} + + +/* + * Returns the hash entry ( struct list_head * ) of the passed index. + */ +static struct list_head *ep_hash_entry(struct eventpoll *ep, unsigned int index) +{ + + return (struct list_head *) (ep->hpages[index / EP_HENTRY_X_PAGE] + + (index % EP_HENTRY_X_PAGE) * sizeof(struct list_head)); +} + + +static int ep_init(struct eventpoll *ep, unsigned int hashbits) +{ + int error; + unsigned int i, hsize; + + rwlock_init(&ep->lock); + init_rwsem(&ep->sem); + init_waitqueue_head(&ep->wq); + init_waitqueue_head(&ep->poll_wait); + INIT_LIST_HEAD(&ep->rdllist); + + /* Hash allocation and setup */ + ep->hashbits = hashbits; + error = ep_alloc_pages(ep->hpages, EP_HASH_PAGES(ep->hashbits)); + if (error) + goto eexit_1; + + /* Initialize hash buckets */ + for (i = 0, hsize = 1 << hashbits; i < hsize; i++) + INIT_LIST_HEAD(ep_hash_entry(ep, i)); + + return 0; +eexit_1: + return error; +} + + +static void ep_free(struct eventpoll *ep) +{ + unsigned int i, hsize; + struct list_head *lsthead, *lnk; + struct epitem *epi; + + /* We need to release all tasks waiting for these file */ + if (waitqueue_active(&ep->poll_wait)) + ep_poll_safewake(&psw, &ep->poll_wait); + + /* + * We need to lock this because we could be hit by + * eventpoll_release() while we're freeing the "struct eventpoll". + * We do not need to hold "ep->sem" here because the epoll file + * is on the way to be removed and no one has references to it + * anymore. The only hit might come from eventpoll_release() but + * holding "epsem" is sufficent here. + */ + down(&epsem); + + /* + * Walks through the whole hash by unregistering poll callbacks. + */ + for (i = 0, hsize = 1 << ep->hashbits; i < hsize; i++) { + lsthead = ep_hash_entry(ep, i); + + list_for_each(lnk, lsthead) { + epi = list_entry(lnk, struct epitem, llink); + + ep_unregister_pollwait(ep, epi); + } + } + + /* + * Walks through the whole hash by freeing each "struct epitem". At this + * point we are sure no poll callbacks will be lingering around, and also by + * write-holding "sem" we can be sure that no file cleanup code will hit + * us during this operation. So we can avoid the lock on "ep->lock". + */ + for (i = 0, hsize = 1 << ep->hashbits; i < hsize; i++) { + lsthead = ep_hash_entry(ep, i); + + while (!list_empty(lsthead)) { + epi = list_entry(lsthead->next, struct epitem, llink); + + ep_remove(ep, epi); + } + } + + up(&epsem); + + /* Free hash pages */ + ep_free_pages(ep->hpages, EP_HASH_PAGES(ep->hashbits)); +} + + +/* + * Search the file inside the eventpoll hash. It add usage count to + * the returned item, so the caller must call ep_release_epitem() + * after finished using the "struct epitem". + */ +static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd) +{ + unsigned long flags; + struct list_head *lsthead, *lnk; + struct epitem *epi = NULL; + + read_lock_irqsave(&ep->lock, flags); + + lsthead = ep_hash_entry(ep, ep_hash_index(ep, file, fd)); + list_for_each(lnk, lsthead) { + epi = list_entry(lnk, struct epitem, llink); + + if (epi->file == file && epi->fd == fd) { + ep_use_epitem(epi); + break; + } + epi = NULL; + } + + read_unlock_irqrestore(&ep->lock, flags); + + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_find(%p) -> %p\n", + current, file, epi)); + + return epi; +} + + +/* + * Increment the usage count of the "struct epitem" making it sure + * that the user will have a valid pointer to reference. + */ +static void ep_use_epitem(struct epitem *epi) +{ + + atomic_inc(&epi->usecnt); +} + + +/* + * Decrement ( release ) the usage count by signaling that the user + * has finished using the structure. It might lead to freeing the + * structure itself if the count goes to zero. + */ +static void ep_release_epitem(struct epitem *epi) +{ + + if (atomic_dec_and_test(&epi->usecnt)) + EPI_MEM_FREE(epi); +} + + +/* + * This is the callback that is used to add our wait queue to the + * target file wakeup lists. + */ +static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, + poll_table *pt) +{ + struct epitem *epi = EP_ITEM_FROM_EPQUEUE(pt); + struct eppoll_entry *pwq; + + if (epi->nwait >= 0 && (pwq = PWQ_MEM_ALLOC())) { + init_waitqueue_func_entry(&pwq->wait, ep_poll_callback); + pwq->whead = whead; + pwq->base = epi; + add_wait_queue(whead, &pwq->wait); + list_add_tail(&pwq->llink, &epi->pwqlist); + epi->nwait++; + } else { + /* We have to signal that an error occured */ + epi->nwait = -1; + } +} + + +static int ep_insert(struct eventpoll *ep, struct epoll_event *event, + struct file *tfile, int fd) +{ + int error, revents, pwake = 0; + unsigned long flags; + struct epitem *epi; + struct ep_pqueue epq; + + error = -ENOMEM; + if (!(epi = EPI_MEM_ALLOC())) + goto eexit_1; + + /* Item initialization follow here ... */ + INIT_LIST_HEAD(&epi->llink); + INIT_LIST_HEAD(&epi->rdllink); + INIT_LIST_HEAD(&epi->fllink); + INIT_LIST_HEAD(&epi->txlink); + INIT_LIST_HEAD(&epi->pwqlist); + epi->ep = ep; + epi->file = tfile; + epi->fd = fd; + epi->event = *event; + atomic_set(&epi->usecnt, 1); + epi->nwait = 0; + + /* Initialize the poll table using the queue callback */ + epq.epi = epi; + init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); + + /* + * Attach the item to the poll hooks and get current event bits. + * We can safely use the file* here because its usage count has + * been increased by the caller of this function. + */ + revents = tfile->f_op->poll(tfile, &epq.pt); + + /* + * We have to check if something went wrong during the poll wait queue + * install process. Namely an allocation for a wait queue failed due + * high memory pressure. + */ + if (epi->nwait < 0) + goto eexit_2; + + /* Add the current item to the list of active epoll hook for this file */ + spin_lock(&tfile->f_ep_lock); + list_add_tail(&epi->fllink, &tfile->f_ep_links); + spin_unlock(&tfile->f_ep_lock); + + /* We have to drop the new item inside our item list to keep track of it */ + write_lock_irqsave(&ep->lock, flags); + + /* Add the current item to the hash table */ + list_add(&epi->llink, ep_hash_entry(ep, ep_hash_index(ep, tfile, fd))); + + /* If the file is already "ready" we drop it inside the ready list */ + if ((revents & event->events) && !EP_IS_LINKED(&epi->rdllink)) { + list_add_tail(&epi->rdllink, &ep->rdllist); + + /* Notify waiting tasks that events are available */ + if (waitqueue_active(&ep->wq)) + wake_up(&ep->wq); + if (waitqueue_active(&ep->poll_wait)) + pwake++; + } + + write_unlock_irqrestore(&ep->lock, flags); + + /* We have to call this outside the lock */ + if (pwake) + ep_poll_safewake(&psw, &ep->poll_wait); + + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_insert(%p, %p, %d)\n", + current, ep, tfile, fd)); + + return 0; + +eexit_2: + ep_unregister_pollwait(ep, epi); + + /* + * We need to do this because an event could have been arrived on some + * allocated wait queue. + */ + write_lock_irqsave(&ep->lock, flags); + if (EP_IS_LINKED(&epi->rdllink)) + EP_LIST_DEL(&epi->rdllink); + write_unlock_irqrestore(&ep->lock, flags); + + EPI_MEM_FREE(epi); +eexit_1: + return error; +} + + +/* + * Modify the interest event mask by dropping an event if the new mask + * has a match in the current file status. + */ +static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event) +{ + int pwake = 0; + unsigned int revents; + unsigned long flags; + + /* + * Set the new event interest mask before calling f_op->poll(), otherwise + * a potential race might occur. In fact if we do this operation inside + * the lock, an event might happen between the f_op->poll() call and the + * new event set registering. + */ + epi->event.events = event->events; + + /* + * Get current event bits. We can safely use the file* here because + * its usage count has been increased by the caller of this function. + */ + revents = epi->file->f_op->poll(epi->file, NULL); + + write_lock_irqsave(&ep->lock, flags); + + /* Copy the data member from inside the lock */ + epi->event.data = event->data; + + /* + * If the item is not linked to the hash it means that it's on its + * way toward the removal. Do nothing in this case. + */ + if (EP_IS_LINKED(&epi->llink)) { + /* + * If the item is "hot" and it is not registered inside the ready + * list, push it inside. If the item is not "hot" and it is currently + * registered inside the ready list, unlink it. + */ + if (revents & event->events) { + if (!EP_IS_LINKED(&epi->rdllink)) { + list_add_tail(&epi->rdllink, &ep->rdllist); + + /* Notify waiting tasks that events are available */ + if (waitqueue_active(&ep->wq)) + wake_up(&ep->wq); + if (waitqueue_active(&ep->poll_wait)) + pwake++; + } + } else if (EP_IS_LINKED(&epi->rdllink)) + EP_LIST_DEL(&epi->rdllink); + } + + write_unlock_irqrestore(&ep->lock, flags); + + /* We have to call this outside the lock */ + if (pwake) + ep_poll_safewake(&psw, &ep->poll_wait); + + return 0; +} + + +/* + * This function unregister poll callbacks from the associated file descriptor. + * Since this must be called without holding "ep->lock" the atomic exchange trick + * will protect us from multiple unregister. + */ +static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi) +{ + int nwait; + struct list_head *lsthead = &epi->pwqlist; + struct eppoll_entry *pwq; + + /* This is called without locks, so we need the atomic exchange */ + nwait = xchg(&epi->nwait, 0); + + if (nwait) { + while (!list_empty(lsthead)) { + pwq = list_entry(lsthead->next, struct eppoll_entry, llink); + + EP_LIST_DEL(&pwq->llink); + remove_wait_queue(pwq->whead, &pwq->wait); + PWQ_MEM_FREE(pwq); + } + } +} + + +/* + * Unlink the "struct epitem" from all places it might have been hooked up. + * This function must be called with write IRQ lock on "ep->lock". + */ +static int ep_unlink(struct eventpoll *ep, struct epitem *epi) +{ + int error; + + /* + * It can happen that this one is called for an item already unlinked. + * The check protect us from doing a double unlink ( crash ). + */ + error = -ENOENT; + if (!EP_IS_LINKED(&epi->llink)) + goto eexit_1; + + /* + * Clear the event mask for the unlinked item. This will avoid item + * notifications to be sent after the unlink operation from inside + * the kernel->userspace event transfer loop. + */ + epi->event.events = 0; + + /* + * At this point is safe to do the job, unlink the item from our list. + * This operation togheter with the above check closes the door to + * double unlinks. + */ + EP_LIST_DEL(&epi->llink); + + /* + * If the item we are going to remove is inside the ready file descriptors + * we want to remove it from this list to avoid stale events. + */ + if (EP_IS_LINKED(&epi->rdllink)) + EP_LIST_DEL(&epi->rdllink); + + error = 0; +eexit_1: + + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_unlink(%p, %p) = %d\n", + current, ep, epi->file, error)); + + return error; +} + + +/* + * Removes a "struct epitem" from the eventpoll hash and deallocates + * all the associated resources. + */ +static int ep_remove(struct eventpoll *ep, struct epitem *epi) +{ + int error; + unsigned long flags; + struct file *file = epi->file; + + /* + * Removes poll wait queue hooks. We _have_ to do this without holding + * the "ep->lock" otherwise a deadlock might occur. This because of the + * sequence of the lock acquisition. Here we do "ep->lock" then the wait + * queue head lock when unregistering the wait queue. The wakeup callback + * will run by holding the wait queue head lock and will call our callback + * that will try to get "ep->lock". + */ + ep_unregister_pollwait(ep, epi); + + /* Remove the current item from the list of epoll hooks */ + spin_lock(&file->f_ep_lock); + if (EP_IS_LINKED(&epi->fllink)) + EP_LIST_DEL(&epi->fllink); + spin_unlock(&file->f_ep_lock); + + /* We need to acquire the write IRQ lock before calling ep_unlink() */ + write_lock_irqsave(&ep->lock, flags); + + /* Really unlink the item from the hash */ + error = ep_unlink(ep, epi); + + write_unlock_irqrestore(&ep->lock, flags); + + if (error) + goto eexit_1; + + /* At this point it is safe to free the eventpoll item */ + ep_release_epitem(epi); + + error = 0; +eexit_1: + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_remove(%p, %p) = %d\n", + current, ep, file, error)); + + return error; +} + + +/* + * This is the callback that is passed to the wait queue wakeup + * machanism. It is called by the stored file descriptors when they + * have events to report. + */ +static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync) +{ + int pwake = 0; + unsigned long flags; + struct epitem *epi = EP_ITEM_FROM_WAIT(wait); + struct eventpoll *ep = epi->ep; + + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n", + current, epi->file, epi, ep)); + + write_lock_irqsave(&ep->lock, flags); + + /* + * If the event mask does not contain any poll(2) event, we consider the + * descriptor to be disabled. This condition is likely the effect of the + * EPOLLONESHOT bit that disables the descriptor when an event is received, + * until the next EPOLL_CTL_MOD will be issued. + */ + if (!(epi->event.events & ~EP_PRIVATE_BITS)) + goto is_disabled; + + /* If this file is already in the ready list we exit soon */ + if (EP_IS_LINKED(&epi->rdllink)) + goto is_linked; + + list_add_tail(&epi->rdllink, &ep->rdllist); + +is_linked: + /* + * Wake up ( if active ) both the eventpoll wait list and the ->poll() + * wait list. + */ + if (waitqueue_active(&ep->wq)) + wake_up(&ep->wq); + if (waitqueue_active(&ep->poll_wait)) + pwake++; + +is_disabled: + write_unlock_irqrestore(&ep->lock, flags); + + /* We have to call this outside the lock */ + if (pwake) + ep_poll_safewake(&psw, &ep->poll_wait); + + return 1; +} + + +static int ep_eventpoll_close(struct inode *inode, struct file *file) +{ + struct eventpoll *ep = file->private_data; + + if (ep) { + ep_free(ep); + kfree(ep); + } + + DNPRINTK(3, (KERN_INFO "[%p] eventpoll: close() ep=%p\n", current, ep)); + return 0; +} + + +static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait) +{ + unsigned int pollflags = 0; + unsigned long flags; + struct eventpoll *ep = file->private_data; + + /* Insert inside our poll wait queue */ + poll_wait(file, &ep->poll_wait, wait); + + /* Check our condition */ + read_lock_irqsave(&ep->lock, flags); + if (!list_empty(&ep->rdllist)) + pollflags = POLLIN | POLLRDNORM; + read_unlock_irqrestore(&ep->lock, flags); + + return pollflags; +} + + +/* + * Since we have to release the lock during the __copy_to_user() operation and + * during the f_op->poll() call, we try to collect the maximum number of items + * by reducing the irqlock/irqunlock switching rate. + */ +static int ep_collect_ready_items(struct eventpoll *ep, struct list_head *txlist, int maxevents) +{ + int nepi; + unsigned long flags; + struct list_head *lsthead = &ep->rdllist, *lnk; + struct epitem *epi; + + write_lock_irqsave(&ep->lock, flags); + + for (nepi = 0, lnk = lsthead->next; lnk != lsthead && nepi < maxevents;) { + epi = list_entry(lnk, struct epitem, rdllink); + + lnk = lnk->next; + + /* If this file is already in the ready list we exit soon */ + if (!EP_IS_LINKED(&epi->txlink)) { + /* + * This is initialized in this way so that the default + * behaviour of the reinjecting code will be to push back + * the item inside the ready list. + */ + epi->revents = epi->event.events; + + /* Link the ready item into the transfer list */ + list_add(&epi->txlink, txlist); + nepi++; + + /* + * Unlink the item from the ready list. + */ + EP_LIST_DEL(&epi->rdllink); + } + } + + write_unlock_irqrestore(&ep->lock, flags); + + return nepi; +} + + +/* + * This function is called without holding the "ep->lock" since the call to + * __copy_to_user() might sleep, and also f_op->poll() might reenable the IRQ + * because of the way poll() is traditionally implemented in Linux. + */ +static int ep_send_events(struct eventpoll *ep, struct list_head *txlist, + struct epoll_event *events) +{ + int eventcnt = 0, eventbuf = 0; + unsigned int revents; + struct list_head *lnk; + struct epitem *epi; + struct epoll_event event[EP_MAX_BUF_EVENTS]; + + /* + * We can loop without lock because this is a task private list. + * The test done during the collection loop will guarantee us that + * another task will not try to collect this file. Also, items + * cannot vanish during the loop because we are holding "sem". + */ + list_for_each(lnk, txlist) { + epi = list_entry(lnk, struct epitem, txlink); + + /* + * Get the ready file event set. We can safely use the file + * because we are holding the "sem" in read and this will + * guarantee that both the file and the item will not vanish. + */ + revents = epi->file->f_op->poll(epi->file, NULL); + + /* + * Set the return event set for the current file descriptor. + * Note that only the task task was successfully able to link + * the item to its "txlist" will write this field. + */ + epi->revents = revents & epi->event.events; + + if (epi->revents) { + event[eventbuf] = epi->event; + event[eventbuf].events &= revents; + eventbuf++; + if (eventbuf == EP_MAX_BUF_EVENTS) { + if (__copy_to_user(&events[eventcnt], event, + eventbuf * sizeof(struct epoll_event))) + return -EFAULT; + eventcnt += eventbuf; + eventbuf = 0; + } + if (epi->event.events & EPOLLONESHOT) + epi->event.events &= EP_PRIVATE_BITS; + } + } + + if (eventbuf) { + if (__copy_to_user(&events[eventcnt], event, + eventbuf * sizeof(struct epoll_event))) + return -EFAULT; + eventcnt += eventbuf; + } + + return eventcnt; +} + + +/* + * Walk through the transfer list we collected with ep_collect_ready_items() + * and, if 1) the item is still "alive" 2) its event set is not empty 3) it's + * not already linked, links it to the ready list. Same as above, we are holding + * "sem" so items cannot vanish underneath our nose. + */ +static void ep_reinject_items(struct eventpoll *ep, struct list_head *txlist) +{ + int ricnt = 0, pwake = 0; + unsigned long flags; + struct epitem *epi; + + write_lock_irqsave(&ep->lock, flags); + + while (!list_empty(txlist)) { + epi = list_entry(txlist->next, struct epitem, txlink); + + /* Unlink the current item from the transfer list */ + EP_LIST_DEL(&epi->txlink); + + /* + * If the item is no more linked to the interest set, we don't + * have to push it inside the ready list because the following + * ep_release_epitem() is going to drop it. Also, if the current + * item is set to have an Edge Triggered behaviour, we don't have + * to push it back either. + */ + if (EP_IS_LINKED(&epi->llink) && !(epi->event.events & EPOLLET) && + (epi->revents & epi->event.events) && !EP_IS_LINKED(&epi->rdllink)) { + list_add_tail(&epi->rdllink, &ep->rdllist); + ricnt++; + } + } + + if (ricnt) { + /* + * Wake up ( if active ) both the eventpoll wait list and the ->poll() + * wait list. + */ + if (waitqueue_active(&ep->wq)) + wake_up(&ep->wq); + if (waitqueue_active(&ep->poll_wait)) + pwake++; + } + + write_unlock_irqrestore(&ep->lock, flags); + + /* We have to call this outside the lock */ + if (pwake) + ep_poll_safewake(&psw, &ep->poll_wait); +} + + +/* + * Perform the transfer of events to user space. + */ +static int ep_events_transfer(struct eventpoll *ep, struct epoll_event *events, int maxevents) +{ + int eventcnt = 0; + struct list_head txlist; + + INIT_LIST_HEAD(&txlist); + + /* + * We need to lock this because we could be hit by + * eventpoll_release() and epoll_ctl(EPOLL_CTL_DEL). + */ + down_read(&ep->sem); + + /* Collect/extract ready items */ + if (ep_collect_ready_items(ep, &txlist, maxevents) > 0) { + /* Build result set in userspace */ + eventcnt = ep_send_events(ep, &txlist, events); + + /* Reinject ready items into the ready list */ + ep_reinject_items(ep, &txlist); + } + + up_read(&ep->sem); + + return eventcnt; +} + + +static int ep_poll(struct eventpoll *ep, struct epoll_event *events, int maxevents, + long timeout) +{ + int res, eavail; + unsigned long flags; + long jtimeout; + wait_queue_t wait; + + /* + * Calculate the timeout by checking for the "infinite" value ( -1 ) + * and the overflow condition. The passed timeout is in milliseconds, + * that why (t * HZ) / 1000. + */ + jtimeout = timeout == -1 || timeout > (MAX_SCHEDULE_TIMEOUT - 1000) / HZ ? + MAX_SCHEDULE_TIMEOUT: (timeout * HZ + 999) / 1000; + +retry: + write_lock_irqsave(&ep->lock, flags); + + res = 0; + if (list_empty(&ep->rdllist)) { + /* + * We don't have any available event to return to the caller. + * We need to sleep here, and we will be wake up by + * ep_poll_callback() when events will become available. + */ + init_waitqueue_entry(&wait, current); + add_wait_queue(&ep->wq, &wait); + + for (;;) { + /* + * We don't want to sleep if the ep_poll_callback() sends us + * a wakeup in between. That's why we set the task state + * to TASK_INTERRUPTIBLE before doing the checks. + */ + set_current_state(TASK_INTERRUPTIBLE); + if (!list_empty(&ep->rdllist) || !jtimeout) + break; + if (signal_pending(current)) { + res = -EINTR; + break; + } + + write_unlock_irqrestore(&ep->lock, flags); + jtimeout = schedule_timeout(jtimeout); + write_lock_irqsave(&ep->lock, flags); + } + remove_wait_queue(&ep->wq, &wait); + + set_current_state(TASK_RUNNING); + } + + /* Is it worth to try to dig for events ? */ + eavail = !list_empty(&ep->rdllist); + + write_unlock_irqrestore(&ep->lock, flags); + + /* + * Try to transfer events to user space. In case we get 0 events and + * there's still timeout left over, we go trying again in search of + * more luck. + */ + if (!res && eavail && + !(res = ep_events_transfer(ep, events, maxevents)) && jtimeout) + goto retry; + + return res; +} + + +static int eventpollfs_statfs(struct super_block *sb, struct statfs *buf) +{ + + buf->f_type = EVENTPOLLFS_MAGIC; + buf->f_bsize = 1024; + buf->f_namelen = 255; + return 0; +} + + +static struct super_block *eventpollfs_read_super(struct super_block *sb, void *data, int silent) +{ + struct inode *root = new_inode(sb); + if (!root) + return NULL; + root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR; + root->i_uid = root->i_gid = 0; + root->i_atime = root->i_mtime = root->i_ctime = CURRENT_TIME; + sb->s_blocksize = 1024; + sb->s_blocksize_bits = 10; + sb->s_magic = EVENTPOLLFS_MAGIC; + sb->s_op = &eventpollfs_ops; + sb->s_root = d_alloc(NULL, &(const struct qstr) {"eventpoll:", 10, 0 }); + if (!sb->s_root) { + iput(root); + return NULL; + } + sb->s_root->d_sb = sb; + sb->s_root->d_parent = sb->s_root; + d_instantiate(sb->s_root, root); + return sb; +} + + +static int eventpollfs_delete_dentry(struct dentry *dentry) +{ + + return 1; +} + + +static struct inode *ep_eventpoll_inode(void) +{ + int error = -ENOMEM; + struct inode *inode = new_inode(eventpoll_mnt->mnt_sb); + + if (!inode) + goto eexit_1; + + inode->i_fop = &eventpoll_fops; + + /* + * Mark the inode dirty from the very beginning, + * that way it will never be moved to the dirty + * list because mark_inode_dirty() will think + * that it already _is_ on the dirty list. + */ + inode->i_state = I_DIRTY; + inode->i_mode = S_IRUSR | S_IWUSR; + inode->i_uid = current->fsuid; + inode->i_gid = current->fsgid; + inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; + inode->i_blksize = PAGE_SIZE; + return inode; + +eexit_1: + return ERR_PTR(error); +} + + +static int __init eventpoll_init(void) +{ + int error; + + init_MUTEX(&epsem); + + /* Initialize the structure used to perform safe poll wait head wake ups */ + ep_poll_safewake_init(&psw); + + /* Allocates slab cache used to allocate "struct epitem" items */ + error = -ENOMEM; + epi_cache = kmem_cache_create("eventpoll epi", + sizeof(struct epitem), + 0, + SLAB_HWCACHE_ALIGN | EPI_SLAB_DEBUG, NULL, NULL); + if (!epi_cache) + goto eexit_1; + + /* Allocates slab cache used to allocate "struct eppoll_entry" */ + error = -ENOMEM; + pwq_cache = kmem_cache_create("eventpoll pwq", + sizeof(struct eppoll_entry), + 0, + EPI_SLAB_DEBUG, NULL, NULL); + if (!pwq_cache) + goto eexit_2; + + /* + * Register the virtual file system that will be the source of inodes + * for the eventpoll files + */ + error = register_filesystem(&eventpoll_fs_type); + if (error) + goto eexit_3; + + /* Mount the above commented virtual file system */ + eventpoll_mnt = kern_mount(&eventpoll_fs_type); + error = PTR_ERR(eventpoll_mnt); + if (IS_ERR(eventpoll_mnt)) + goto eexit_4; + + return 0; + +eexit_4: + unregister_filesystem(&eventpoll_fs_type); +eexit_3: + kmem_cache_destroy(pwq_cache); +eexit_2: + kmem_cache_destroy(epi_cache); +eexit_1: + + return error; +} + + +static void __exit eventpoll_exit(void) +{ + /* Undo all operations done inside eventpoll_init() */ + unregister_filesystem(&eventpoll_fs_type); + mntput(eventpoll_mnt); + kmem_cache_destroy(pwq_cache); + kmem_cache_destroy(epi_cache); +} + +module_init(eventpoll_init); +module_exit(eventpoll_exit); + +MODULE_LICENSE("GPL"); + diff -Nru linux-2.4/fs/file_table.c linux-2.4.epoll/fs/file_table.c --- linux-2.4/fs/file_table.c 2003-12-29 11:26:11.579078936 -0800 +++ linux-2.4.epoll/fs/file_table.c 2003-12-29 11:28:20.948411808 -0800 @@ -12,6 +12,7 @@ #include #include #include +#include /* sysctl tunables... */ struct files_stat_struct files_stat = {0, 0, NR_FILE}; @@ -42,6 +43,7 @@ files_stat.nr_free_files--; new_one: memset(f, 0, sizeof(*f)); + eventpoll_init_file(f); atomic_set(&f->f_count,1); f->f_version = ++event; f->f_uid = current->fsuid; @@ -85,6 +87,7 @@ int init_private_file(struct file *filp, struct dentry *dentry, int mode) { memset(filp, 0, sizeof(*filp)); + eventpoll_init_file(filp); filp->f_mode = mode; atomic_set(&filp->f_count, 1); filp->f_dentry = dentry; @@ -104,6 +107,11 @@ struct inode * inode = dentry->d_inode; if (atomic_dec_and_test(&file->f_count)) { + /* + * The function eventpoll_release() should be the + * first called in the file cleanup chain. + */ + eventpoll_release(file); locks_remove_flock(file); if (file->f_iobuf) diff -Nru linux-2.4/fs/Makefile linux-2.4.epoll/fs/Makefile --- linux-2.4/fs/Makefile 2003-12-29 11:26:11.437100520 -0800 +++ linux-2.4.epoll/fs/Makefile 2003-12-29 11:28:20.948411808 -0800 @@ -14,7 +14,7 @@ super.o block_dev.o char_dev.o stat.o exec.o pipe.o namei.o \ fcntl.o ioctl.o readdir.o select.o fifo.o locks.o \ dcache.o inode.o attr.o bad_inode.o file.o iobuf.o dnotify.o \ - filesystems.o namespace.o seq_file.o xattr.o quota.o + filesystems.o namespace.o seq_file.o xattr.o quota.o eventpoll.o obj-$(CONFIG_QUOTA) += dquot.o quota_v1.o obj-$(CONFIG_QFMT_V2) += quota_v2.o diff -Nru linux-2.4/fs/ncpfs/sock.c linux-2.4.epoll/fs/ncpfs/sock.c --- linux-2.4/fs/ncpfs/sock.c 2003-12-29 11:26:14.287667168 -0800 +++ linux-2.4.epoll/fs/ncpfs/sock.c 2003-12-29 11:28:20.990405424 -0800 @@ -86,7 +86,7 @@ struct socket *sock; int result; char *start = server->packet; - poll_table wait_table; + struct poll_wqueues wait_table; int init_timeout, max_timeout; int timeout; int retrans; @@ -140,7 +140,7 @@ instructions adding the wait_table waitqueues in the waitqueue-head before going to calculate the mask-retval. */ __set_current_state(TASK_INTERRUPTIBLE); - if (!(sock->ops->poll(file, sock, &wait_table) & POLLIN)) { + if (!(sock->ops->poll(file, sock, &wait_table.pt) & POLLIN)) { int timed_out; if (timeout > max_timeout) { /* JEJB/JSP 2/7/94 @@ -261,7 +261,7 @@ } static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len) { - poll_table wait_table; + struct poll_wqueues wait_table; struct file *file; struct socket *sock; int init_timeout; @@ -285,7 +285,7 @@ instructions adding the wait_table waitqueues in the waitqueue-head before going to calculate the mask-retval. */ __set_current_state(TASK_INTERRUPTIBLE); - if (!(sock->ops->poll(file, sock, &wait_table) & POLLIN)) { + if (!(sock->ops->poll(file, sock, &wait_table.pt) & POLLIN)) { init_timeout = schedule_timeout(init_timeout); poll_freewait(&wait_table); current->state = TASK_RUNNING; diff -Nru linux-2.4/fs/select.c linux-2.4.epoll/fs/select.c --- linux-2.4/fs/select.c 2003-12-29 11:26:11.588077568 -0800 +++ linux-2.4.epoll/fs/select.c 2003-12-29 11:28:20.991405272 -0800 @@ -19,6 +19,7 @@ #include #include /* for STICKY_TIMEOUTS */ #include +#include #include @@ -52,10 +53,18 @@ * as all select/poll functions have to call it to add an entry to the * poll table. */ +void __pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p); -void poll_freewait(poll_table* pt) +void poll_initwait(struct poll_wqueues *pwq) { - struct poll_table_page * p = pt->table; + init_poll_funcptr(&pwq->pt, __pollwait); + pwq->error = 0; + pwq->table = NULL; +} + +void poll_freewait(struct poll_wqueues *pwq) +{ + struct poll_table_page * p = pwq->table; while (p) { struct poll_table_entry * entry; struct poll_table_page *old; @@ -72,8 +81,9 @@ } } -void __pollwait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) +void __pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *_p) { + struct poll_wqueues *p = container_of(_p, struct poll_wqueues, pt); struct poll_table_page *table = p->table; if (!table || POLL_TABLE_FULL(table)) { @@ -103,6 +113,7 @@ } } + #define __IN(fds, n) (fds->in + n) #define __OUT(fds, n) (fds->out + n) #define __EX(fds, n) (fds->ex + n) @@ -163,7 +174,8 @@ int do_select(int n, fd_set_bits *fds, long *timeout) { - poll_table table, *wait; + struct poll_wqueues table; + poll_table *wait; int retval, i, off; long __timeout = *timeout; @@ -176,7 +188,7 @@ n = retval; poll_initwait(&table); - wait = &table; + wait = &table.pt; if (!__timeout) wait = NULL; retval = 0; @@ -383,10 +395,10 @@ } static int do_poll(unsigned int nfds, unsigned int nchunks, unsigned int nleft, - struct pollfd *fds[], poll_table *wait, long timeout) + struct pollfd *fds[], struct poll_wqueues *wait, long timeout) { int count; - poll_table* pt = wait; + poll_table* pt = &wait->pt; for (;;) { unsigned int i; @@ -413,7 +425,7 @@ { int i, j, fdcount, err; struct pollfd **fds; - poll_table table, *wait; + struct poll_wqueues table, *wait; int nchunks, nleft; /* Do a sanity check on nfds ... */ diff -Nru linux-2.4/fs/smbfs/sock.c linux-2.4.epoll/fs/smbfs/sock.c --- linux-2.4/fs/smbfs/sock.c 2003-12-29 11:26:15.368502856 -0800 +++ linux-2.4.epoll/fs/smbfs/sock.c 2003-12-29 11:28:20.991405272 -0800 @@ -314,7 +314,7 @@ smb_receive_poll(struct smb_sb_info *server) { struct file *file = server->sock_file; - poll_table wait_table; + struct poll_wqueues wait_table; int result = 0; int timeout = server->mnt->timeo * HZ; int mask; @@ -323,7 +323,7 @@ poll_initwait(&wait_table); set_current_state(TASK_INTERRUPTIBLE); - mask = file->f_op->poll(file, &wait_table); + mask = file->f_op->poll(file, &wait_table.pt); if (mask & POLLIN) { poll_freewait(&wait_table); current->state = TASK_RUNNING; diff -Nru linux-2.4/include/asm-i386/unistd.h linux-2.4.epoll/include/asm-i386/unistd.h --- linux-2.4/include/asm-i386/unistd.h 2003-12-29 11:26:18.819978152 -0800 +++ linux-2.4.epoll/include/asm-i386/unistd.h 2003-12-29 11:28:21.033398888 -0800 @@ -257,6 +257,10 @@ #define __NR_alloc_hugepages 250 #define __NR_free_hugepages 251 #define __NR_exit_group 252 +#define __NR_epoll_create 254 +#define __NR_epoll_ctl 255 +#define __NR_epoll_wait 256 + /* user-visible error numbers are in the range -1 - -124: see */ diff -Nru linux-2.4/include/asm-ia64/unistd.h linux-2.4.epoll/include/asm-ia64/unistd.h --- linux-2.4/include/asm-ia64/unistd.h 2003-12-29 11:26:18.918963104 -0800 +++ linux-2.4.epoll/include/asm-ia64/unistd.h 2003-12-29 11:28:21.034398736 -0800 @@ -223,6 +223,9 @@ #define __NR_security 1233 /* 1234-1235: reserved for {alloc,free}_hugepages */ /* 1238-1242: reserved for io_{setup,destroy,getevents,submit,cancel} */ +#define __NR_epoll_create 1243 +#define __NR_epoll_ctl 1244 +#define __NR_epoll_wait 1245 #define __NR_semtimedop 1247 #if !defined(__ASSEMBLY__) && !defined(ASSEMBLER) diff -Nru linux-2.4/include/linux/eventpoll.h linux-2.4.epoll/include/linux/eventpoll.h --- linux-2.4/include/linux/eventpoll.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.4.epoll/include/linux/eventpoll.h 2003-12-29 11:28:28.310292632 -0800 @@ -0,0 +1,96 @@ +/* + * include/linux/eventpoll.h ( Efficent event polling implementation ) + * Copyright (C) 2001,...,2003 Davide Libenzi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Davide Libenzi + * + */ + +#ifndef _LINUX_EVENTPOLL_H +#define _LINUX_EVENTPOLL_H + +#include + + +/* Valid opcodes to issue to sys_epoll_ctl() */ +#define EPOLL_CTL_ADD 1 +#define EPOLL_CTL_DEL 2 +#define EPOLL_CTL_MOD 3 + +/* Set the One Shot behaviour for the target file descriptor */ +#define EPOLLONESHOT (1 << 30) + +/* Set the Edge Triggered behaviour for the target file descriptor */ +#define EPOLLET (1 << 31) + +/* + * On x86-64 and ia64 make the 64bit structure have the same alignment + * as the 32bit structure. This makes 32bit emulation easier. + */ +#if defined(__x86_64__) +#define EPOLL_PACKED __attribute__((packed)) +#else +#define EPOLL_PACKED +#endif + +struct epoll_event { + __u32 events; + __u64 data; +} EPOLL_PACKED; + +#ifdef __KERNEL__ + +/* Forward declarations to avoid compiler errors */ +struct file; + + +/* Kernel space functions implementing the user space "epoll" API */ +asmlinkage long sys_epoll_create(int size); +asmlinkage long sys_epoll_ctl(int epfd, int op, int fd, struct epoll_event *event); +asmlinkage long sys_epoll_wait(int epfd, struct epoll_event *events, int maxevents, + int timeout); + +/* Used to initialize the epoll bits inside the "struct file" */ +void eventpoll_init_file(struct file *file); + +/* Used to release the epoll bits inside the "struct file" */ +void eventpoll_release_file(struct file *file); + +/* + * This is called from inside fs/file_table.c:__fput() to unlink files + * from the eventpoll interface. We need to have this facility to cleanup + * correctly files that are closed without being removed from the eventpoll + * interface. + */ +static inline void eventpoll_release(struct file *file) +{ + + /* + * Fast check to avoid the get/release of the semaphore. Since + * we're doing this outside the semaphore lock, it might return + * false negatives, but we don't care. It'll help in 99.99% of cases + * to avoid the semaphore lock. False positives simply cannot happen + * because the file in on the way to be removed and nobody ( but + * eventpoll ) has still a reference to this file. + */ + if (likely(list_empty(&file->f_ep_links))) + return; + + /* + * The file is being closed while it is still linked to an epoll + * descriptor. We need to handle this by correctly unlinking it + * from its containers. + */ + eventpoll_release_file(file); +} + + +#endif /* #ifdef __KERNEL__ */ + +#endif /* #ifndef _LINUX_EVENTPOLL_H */ + diff -Nru linux-2.4/include/linux/fs.h linux-2.4.epoll/include/linux/fs.h --- linux-2.4/include/linux/fs.h 2003-12-29 11:26:24.786071168 -0800 +++ linux-2.4.epoll/include/linux/fs.h 2003-12-29 11:28:21.077392200 -0800 @@ -584,6 +584,8 @@ /* preallocated helper kiobuf to speedup O_DIRECT */ struct kiobuf *f_iobuf; long f_iobuf_lock; + struct list_head f_ep_links; + spinlock_t f_ep_lock; }; extern spinlock_t files_lock; #define file_list_lock() spin_lock(&files_lock); diff -Nru linux-2.4/include/linux/hash.h linux-2.4.epoll/include/linux/hash.h --- linux-2.4/include/linux/hash.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.4.epoll/include/linux/hash.h 2003-12-29 11:28:21.077392200 -0800 @@ -0,0 +1,58 @@ +#ifndef _LINUX_HASH_H +#define _LINUX_HASH_H +/* Fast hashing routine for a long. + (C) 2002 William Lee Irwin III, IBM */ + +/* + * Knuth recommends primes in approximately golden ratio to the maximum + * integer representable by a machine word for multiplicative hashing. + * Chuck Lever verified the effectiveness of this technique: + * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf + * + * These primes are chosen to be bit-sparse, that is operations on + * them can use shifts and additions instead of multiplications for + * machines where multiplications are slow. + */ +#if BITS_PER_LONG == 32 +/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ +#define GOLDEN_RATIO_PRIME 0x9e370001UL +#elif BITS_PER_LONG == 64 +/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */ +#define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL +#else +#error Define GOLDEN_RATIO_PRIME for your wordsize. +#endif + +static inline unsigned long hash_long(unsigned long val, unsigned int bits) +{ + unsigned long hash = val; + +#if BITS_PER_LONG == 64 + /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ + unsigned long n = hash; + n <<= 18; + hash -= n; + n <<= 33; + hash -= n; + n <<= 3; + hash += n; + n <<= 3; + hash -= n; + n <<= 4; + hash += n; + n <<= 2; + hash += n; +#else + /* On some cpus multiply is faster, on others gcc will do shifts */ + hash *= GOLDEN_RATIO_PRIME; +#endif + + /* High bits are more random, so use them. */ + return hash >> (BITS_PER_LONG - bits); +} + +static inline unsigned long hash_ptr(void *ptr, unsigned int bits) +{ + return hash_long((unsigned long)ptr, bits); +} +#endif /* _LINUX_HASH_H */ diff -Nru linux-2.4/include/linux/kernel.h linux-2.4.epoll/include/linux/kernel.h --- linux-2.4/include/linux/kernel.h 2003-12-29 11:26:24.863059464 -0800 +++ linux-2.4.epoll/include/linux/kernel.h 2003-12-29 11:28:21.078392048 -0800 @@ -174,6 +174,18 @@ extern void __out_of_line_bug(int line) ATTRIB_NORET; #define out_of_line_bug() __out_of_line_bug(__LINE__) +/** + * container_of - cast a member of a structure out to the containing structure + * + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. + * + */ +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) + #endif /* __KERNEL__ */ #define SI_LOAD_SHIFT 16 diff -Nru linux-2.4/include/linux/poll.h linux-2.4.epoll/include/linux/poll.h --- linux-2.4/include/linux/poll.h 2003-12-29 11:26:25.077026936 -0800 +++ linux-2.4.epoll/include/linux/poll.h 2003-12-29 11:28:21.078392048 -0800 @@ -10,28 +10,39 @@ #include #include -struct poll_table_page; +struct poll_table_struct; + +/* + * structures and helpers for f_op->poll implementations + */ +typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); typedef struct poll_table_struct { - int error; - struct poll_table_page * table; + poll_queue_proc qproc; } poll_table; -extern void __pollwait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p); - static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) { if (p && wait_address) - __pollwait(filp, wait_address, p); + p->qproc(filp, wait_address, p); } -static inline void poll_initwait(poll_table* pt) +static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) { - pt->error = 0; - pt->table = NULL; + pt->qproc = qproc; } -extern void poll_freewait(poll_table* pt); +/* + * Structures and helpers for sys_poll/sys_poll + */ +struct poll_wqueues { + poll_table pt; + struct poll_table_page * table; + int error; +}; + +extern void poll_initwait(struct poll_wqueues *pwq); +extern void poll_freewait(struct poll_wqueues *pwq); /* * Scaleable version of the fd_set. diff -Nru linux-2.4/include/linux/sched.h linux-2.4.epoll/include/linux/sched.h --- linux-2.4/include/linux/sched.h 2003-12-29 11:26:25.095024200 -0800 +++ linux-2.4.epoll/include/linux/sched.h 2003-12-29 11:28:21.120385664 -0800 @@ -135,6 +135,8 @@ extern spinlock_t runqueue_lock; extern spinlock_t mmlist_lock; +typedef struct task_struct task_t; + extern void sched_init(void); extern void init_idle(void); extern void show_state(void); diff -Nru linux-2.4/include/linux/wait.h linux-2.4.epoll/include/linux/wait.h --- linux-2.4/include/linux/wait.h 2003-12-29 11:26:25.144016752 -0800 +++ linux-2.4.epoll/include/linux/wait.h 2003-12-29 11:28:21.121385512 -0800 @@ -19,6 +19,9 @@ #include #include +typedef struct __wait_queue wait_queue_t; +typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned int, int); + /* * Debug control. Slow but useful. */ @@ -32,13 +35,13 @@ unsigned int flags; #define WQ_FLAG_EXCLUSIVE 0x01 struct task_struct * task; + wait_queue_func_t func; struct list_head task_list; #if WAITQUEUE_DEBUG long __magic; long __waker; #endif }; -typedef struct __wait_queue wait_queue_t; /* * 'dual' spinlock architecture. Can be switched between spinlock_t and @@ -138,6 +141,7 @@ #define __WAITQUEUE_INITIALIZER(name, tsk) { \ task: tsk, \ + func: NULL, \ task_list: { NULL, NULL }, \ __WAITQUEUE_DEBUG_INIT(name)} @@ -174,10 +178,18 @@ #endif q->flags = 0; q->task = p; + q->func = NULL; #if WAITQUEUE_DEBUG q->__magic = (long)&q->__magic; #endif } +static inline void init_waitqueue_func_entry(wait_queue_t *q, + wait_queue_func_t func) +{ + q->flags = 0; + q->task = NULL; + q->func = func; +} static inline int waitqueue_active(wait_queue_head_t *q) { @@ -189,6 +201,22 @@ return !list_empty(&q->task_list); } +#define add_wait_queue_cond(q, wait, cond) \ + ({ \ + unsigned long flags; \ + int _raced = 0; \ + wq_write_lock_irqsave(&(q)->lock, flags); \ + (wait)->flags = 0; \ + __add_wait_queue((q), (wait)); \ + rmb(); \ + if (!(cond)) { \ + _raced = 1; \ + __remove_wait_queue((q), (wait)); \ + } \ + wq_write_unlock_irqrestore(&(q)->lock, flags); \ + _raced; \ + }) + static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) { diff -Nru linux-2.4/kernel/ksyms.c linux-2.4.epoll/kernel/ksyms.c --- linux-2.4/kernel/ksyms.c 2003-12-29 11:26:26.545803648 -0800 +++ linux-2.4.epoll/kernel/ksyms.c 2003-12-29 11:28:21.121385512 -0800 @@ -271,7 +271,7 @@ EXPORT_SYMBOL(generic_read_dir); EXPORT_SYMBOL(generic_file_llseek); EXPORT_SYMBOL(no_llseek); -EXPORT_SYMBOL(__pollwait); +EXPORT_SYMBOL(poll_initwait); EXPORT_SYMBOL(poll_freewait); EXPORT_SYMBOL(ROOT_DEV); EXPORT_SYMBOL(__find_get_page); diff -Nru linux-2.4/kernel/sched.c linux-2.4.epoll/kernel/sched.c --- linux-2.4/kernel/sched.c 2003-12-29 11:26:26.550802888 -0800 +++ linux-2.4.epoll/kernel/sched.c 2003-12-29 11:28:21.163379128 -0800 @@ -714,16 +714,24 @@ static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode, int nr_exclusive, const int sync) { - struct list_head *tmp; + struct list_head *tmp, *next; struct task_struct *p; CHECK_MAGIC_WQHEAD(q); WQ_CHECK_LIST_HEAD(&q->task_list); - list_for_each(tmp,&q->task_list) { + list_for_each_safe(tmp, next, &q->task_list) { unsigned int state; + wait_queue_func_t func; wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list); - + func = curr->func; + if (func) { + unsigned flags = curr->flags; + if (func(curr, mode, sync) && + (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) + break; + continue; + } CHECK_MAGIC(curr->__magic); p = curr->task; state = p->state;