| t@@ -3,30 +3,18 @@
#include
-#define MIN(a,b) ((a) < (b) ? (a) : (b))
-#define MAX(a,b) ((a) > (b) ? (a) : (b))
-
-enum {
- FD_READ = 1,
- FD_WRITE,
- FD_RDWR,
- FDTBL_MAXSIZE = 128
+struct thread_tag {
+ struct thread_tag *next;
+ spinlock_t l;
+ volatile int key;
+ void *data;
};
-struct fd_entry {
- QLock lock;
- int fd;
- int id;
- short rc;
- short wc;
- short ref;
-};
-
-static struct fd_entry fd_entry1 = { .fd -1 };
-static struct fd_entry *fd_table = nil;
-static spinlock_t fd_table_lock = { 0, 0, nil, 0 };
-static spinlock_t mlock = { 0, 0, nil, 0 };
-static spinlock_t dl_lock = { 0, 0, nil, 0 };
+static spinlock_t mlock;
+static spinlock_t dl_lock;
+static spinlock_t tag_lock;
+static struct thread_tag *thread_tag_store = nil;
+static uint nextkey = 0;
void
_thread_malloc_lock(void)
t@@ -46,131 +34,106 @@ _thread_malloc_init(void)
}
/*
- * Must set errno on failure because the return value
- * of _thread_fd_entry is propagated back to the caller
- * of the thread-wrapped libc function.
+ * for ld.so
*/
-static struct fd_entry *
-_thread_fd_lookup(int fd)
+void
+_thread_dl_lock(int t)
{
- struct fd_entry *t;
- static int cursize;
- int newsize;
-
- if(fd >= FDTBL_MAXSIZE) {
- errno = EBADF;
- return nil;
- }
-
- /*
- * There are currently only a few libc functions using
- * _thread_fd_*, which are rarely called by P9P programs.
- * So the contention for these locks is very small and so
- * far have usually been limited to a single fd. So
- * rather than malloc the fd_table everytime we just use
- * a single fd_entry until a lock request for another fd
- * comes in.
- */
- if(fd_table == nil)
- if(fd_entry1.fd == -1) {
- fd_entry1.fd = fd;
- return &fd_entry1;
- } else if(fd_entry1.fd == fd)
- return &fd_entry1;
- else {
- cursize = MAX(fd_entry1.fd, 16);
- fd_table = malloc(cursize*sizeof(fd_table[0]));
- if(fd_table == nil) {
- errno = ENOMEM;
- return nil;
- }
- memset(fd_table, 0, cursize*sizeof(fd_table[0]));
- fd_table[fd_entry1.fd] = fd_entry1;
- }
- if(fd > cursize) {
- newsize = MIN(cursize*2, FDTBL_MAXSIZE);
- t = realloc(fd_table, newsize*sizeof(fd_table[0]));
- if(t == nil) {
- errno = ENOMEM;
- return nil;
- }
- fd_table = t;
- cursize = newsize;
- memset(fd_table, 0, cursize*sizeof(fd_table[0]));
- }
-
- return &fd_table[fd];
+ if(t)
+ _spinunlock(&dl_lock);
+ else
+ _spinlock(&dl_lock);
}
/*
- * Mutiple readers just share the lock by incrementing the read count.
- * Writers must obtain an exclusive lock.
+ * for libc
*/
-int
-_thread_fd_lock(int fd, int type, struct timespec *time)
+static void
+_thread_tag_init(void **tag)
{
- struct fd_entry *fde;
- int id;
-
- _spinlock(&fd_table_lock);
- fde = _thread_fd_lookup(fd);
- if(fde == nil)
- return -1;
-
- if(type == FD_READ) {
- if(fde->rc++ >= 1 && fde->wc == 0) {
- _spinunlock(&fd_table_lock);
- return 0;
+ struct thread_tag *t;
+
+ _spinlock(&tag_lock);
+ if(*tag == nil) {
+ t = malloc(sizeof (*t));
+ if(t != nil) {
+ memset(&t->l, 0, sizeof(t->l));
+ t->key = nextkey++;
+ *tag = t;
}
- } else
- fde->wc++;
- _spinunlock(&fd_table_lock);
-
- /* handle recursion */
- id = proc()->osprocid;
- if(id == fde->id) {
- fde->ref++;
- return 0;
}
+ _spinunlock(&tag_lock);
+}
- qlock(&fde->lock);
- fde->id = id;
- return 0;
+void
+_thread_tag_lock(void **tag)
+{
+ struct thread_tag *t;
+
+ if(*tag == nil)
+ _thread_tag_init(tag);
+ t = *tag;
+ _spinlock(&t->l);
}
void
-_thread_fd_unlock(int fd, int type)
+_thread_tag_unlock(void **tag)
{
- struct fd_entry *fde;
- int id;
+ struct thread_tag *t;
- fde = _thread_fd_lookup(fd);
- if(fde == nil) {
- fprint(2, "_thread_fd_unlock: fd %d not in table!\n", fd);
- return;
- }
+ if(*tag == nil)
+ _thread_tag_init(tag);
+ t = *tag;
+ _spinunlock(&t->l);
+}
- if(type == FD_READ && --fde->rc >= 1)
- return;
- else
- fde->wc--;
+static void *
+_thread_tag_insert(struct thread_tag *t, void *v)
+{
+ t->data = v;
+ t->next = thread_tag_store;
+ thread_tag_store = t;
+ return t;
+}
- id = proc()->osprocid;
- if(id == fde->id && fde->ref > 0) {
- fde->ref--;
- return;
+static void *
+_thread_tag_lookup(struct thread_tag *tag, int size)
+{
+ struct thread_tag *t;
+ void *p;
+
+ _spinlock(&tag->l);
+ for(t = thread_tag_store; t != nil; t = t->next)
+ if(t->key == tag->key)
+ break;
+ if(t == nil) {
+ p = malloc(size);
+ if(p == nil) {
+ _spinunlock(&tag->l);
+ return nil;
+ }
+ _thread_tag_insert(tag, p);
}
- fde->id = 0;
- qunlock(&fde->lock);
+ _spinunlock(&tag->l);
+ return tag->data;
}
-void
-_thread_dl_lock(int t)
+void *
+_thread_tag_storage(void **tag, void *storage, size_t n, void *err)
{
- if(t)
- _spinunlock(&dl_lock);
+ struct thread_tag *t;
+ void *r;
+
+ if(*tag == nil)
+ _thread_tag_init(tag);
+ t = *tag;
+
+ r = _thread_tag_lookup(t, n);
+ if(r == nil)
+ r = err;
else
- _spinlock(&dl_lock);
+ memcpy(r, storage, n);
+ return r;
}
void |