summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMartin Schlemmer <azarah@gentoo.org>2002-12-08 20:16:28 +0000
committerMartin Schlemmer <azarah@gentoo.org>2002-12-08 20:16:28 +0000
commitd470bb5bde54af5061b83cdcffaab21fe29993dc (patch)
treef788ecbad3ad9eccf94ac7130be03c791959151b /media-video/nvidia-kernel
parentnew package (diff)
downloadgentoo-2-d470bb5bde54af5061b83cdcffaab21fe29993dc.tar.gz
gentoo-2-d470bb5bde54af5061b83cdcffaab21fe29993dc.tar.bz2
gentoo-2-d470bb5bde54af5061b83cdcffaab21fe29993dc.zip
update kernel 2.5 patches
Diffstat (limited to 'media-video/nvidia-kernel')
-rw-r--r--media-video/nvidia-kernel/ChangeLog7
-rw-r--r--media-video/nvidia-kernel/files/NVIDIA_kernel-1.0-3123-2.5-tl-pa.diff379
-rw-r--r--media-video/nvidia-kernel/files/NVIDIA_kernel-1.0-3123-2.5-tl.diff949
-rw-r--r--media-video/nvidia-kernel/files/digest-nvidia-kernel-1.0.3123-r21
-rw-r--r--media-video/nvidia-kernel/nvidia-kernel-1.0.3123-r2.ebuild101
5 files changed, 1436 insertions, 1 deletions
diff --git a/media-video/nvidia-kernel/ChangeLog b/media-video/nvidia-kernel/ChangeLog
index 3e4f90cd1663..0d4ab6804cf0 100644
--- a/media-video/nvidia-kernel/ChangeLog
+++ b/media-video/nvidia-kernel/ChangeLog
@@ -1,6 +1,11 @@
# ChangeLog for media-video/nvidia-kernel
# Copyright 2002 Gentoo Technologies, Inc.; Distributed under the GPL
-# $Header: /var/cvsroot/gentoo-x86/media-video/nvidia-kernel/ChangeLog,v 1.10 2002/11/03 09:38:13 azarah Exp $
+# $Header: /var/cvsroot/gentoo-x86/media-video/nvidia-kernel/ChangeLog,v 1.11 2002/12/08 20:16:28 azarah Exp $
+
+*nvidia-kernel-1.0.3123-r2 (8 Dec 2002)
+
+ 8 Dec 2002; Martin Schlemmer <azarah@gentoo.org>:
+ Update kernel 2.5 patches, closing bugs #11191 and #11701.
*nvidia-kernel-1.0.3123-r1 (3 Nov 2002)
diff --git a/media-video/nvidia-kernel/files/NVIDIA_kernel-1.0-3123-2.5-tl-pa.diff b/media-video/nvidia-kernel/files/NVIDIA_kernel-1.0-3123-2.5-tl-pa.diff
new file mode 100644
index 000000000000..4c6ab5407e7e
--- /dev/null
+++ b/media-video/nvidia-kernel/files/NVIDIA_kernel-1.0-3123-2.5-tl-pa.diff
@@ -0,0 +1,379 @@
+diff -ru NVIDIA_kernel-1.0-3123-2.5/nv-linux.h NVIDIA_kernel-1.0-3123-2.5-pa/nv-linux.h
+--- NVIDIA_kernel-1.0-3123-2.5/nv-linux.h Sat Oct 26 07:38:02 2002
++++ NVIDIA_kernel-1.0-3123-2.5-pa/nv-linux.h Sat Oct 26 07:07:06 2002
+@@ -61,7 +61,7 @@
+
+ #include <linux/pci.h> /* pci_find_class, etc */
+ #include <linux/wrapper.h> /* mem_map_reserve */
+-#include <linux/interrupt.h> /* tasklets, interrupt helpers */
++#include <linux/interrupt.h> /* early kernels: bh; newer: irq */
+
+ #include <asm/system.h> /* cli, sli, save_flags */
+ #include <asm/io.h> /* ioremap, virt_to_phys */
+@@ -122,6 +122,7 @@
+ # define MEM_MAP_READ_COUNT(map_nr) (atomic_read(&mem_map[map_nr].count))
+ # define MEM_MAP_INC_COUNT(map_nr) (atomic_inc(&mem_map[map_nr].count))
+ # define MEM_MAP_DEC_COUNT(map_nr) (atomic_dec(&mem_map[map_nr].count))
++# define VMA_PRIVATE(vma) ((void*)((vma)->vm_pte))
+ #else
+ # define LINUX_VMA_OFFS(vma) (((vma)->vm_pgoff) << PAGE_SHIFT)
+ # define GET_MODULE_SYMBOL(mod,sym) (const void *) inter_module_get(sym)
+@@ -130,6 +131,7 @@
+ # define MEM_MAP_READ_COUNT(map_nr) (atomic_read(&(map_nr)->count))
+ # define MEM_MAP_INC_COUNT(map_nr) (atomic_inc(&(map_nr)->count))
+ # define MEM_MAP_DEC_COUNT(map_nr) (atomic_dec(&(map_nr)->count))
++# define VMA_PRIVATE(vma) ((vma)->vm_private_data)
+ #endif
+
+ #ifdef KERNEL_2_5
+@@ -202,6 +204,7 @@
+ typedef struct nv_alloc_s {
+ struct nv_alloc_s *next;
+ struct vm_area_struct *vma;
++ unsigned int usage_count;
+ unsigned int process_id;
+ unsigned int thread_gid;
+ unsigned int num_pages;
+diff -ru NVIDIA_kernel-1.0-3123-2.5/nv.c NVIDIA_kernel-1.0-3123-2.5-pa/nv.c
+--- NVIDIA_kernel-1.0-3123-2.5/nv.c Sat Oct 26 07:20:31 2002
++++ NVIDIA_kernel-1.0-3123-2.5-pa/nv.c Sat Oct 26 07:04:46 2002
+@@ -74,11 +74,21 @@
+ static int nvos_is_nv_device(struct pci_dev *dev);
+ static int nvos_set_primary_card(nv_ioctl_primary_card_t *info);
+ static int nvos_probe_devices(void);
+-static void * nvos_malloc(unsigned long);
+-static void nvos_free(void **);
+-
+ static void nvos_proc_create(void);
+ static void nvos_proc_remove(void);
++static void * nvos_malloc_pages(unsigned long);
++static void nvos_unlock_pages(void **);
++static void nvos_free_pages(void **);
++
++#define nvos_unlock_and_free_pages(count, page_list) \
++ if (page_list) { \
++ if (count == 0) \
++ nvos_unlock_pages(page_list); \
++ nvos_free_pages(page_list); \
++ }
++
++static nv_alloc_t *nvos_create_alloc();
++static int nvos_free_alloc(nv_alloc_t *);
+
+ /* nvl_ functions.. take a linux state device pointer */
+ static nv_alloc_t *nvl_find_alloc(nv_linux_state_t *, unsigned long, nv_alloc_t **);
+@@ -364,18 +374,39 @@
+ * memory on systems with high memory support enabled.
+ */
+
+-static void *nvos_malloc(unsigned long size)
++/* note that there's a subtle kernel interaction with regards to bookkeeping
++ * on these pages. So long as the pages are marked reserved, the kernel won't
++ * touch them (alter the usage count on them). this leads to a subtle problem
++ * with mmap. Normally, allocating the pages would set the count to 1, then
++ * mmaping them would bump the count up to 2. The order of unmapping and freeing
++ * the pages wouldn't matter, as they wouldn't really be considered free by the
++ * kernel until the count dropped back to 0. Since the kernel won't touch the
++ * count when the page is reserved, we need to be careful about this order and
++ * unreserving the pages. if we unreserve the pages while freeing them, and the
++ * munmap comes later, the munmap code path will attempt a second free on the
++ * same pages. We also don't have a lot of control over which comes first,
++ * sometimes we'll get called to free the pages first, sometimes we'll get called
++ * to munmap them first. Oh, and we'll get vma open/close calls every time the
++ * process is cloned, then execv'd, and munmap == vma close.
++ * sooo, we keep our own count of the allocation usage, and don't unreserve the
++ * pages until our count drops to 0. this should currently happen in either
++ * vma_release or nvos_free, both of which will be followed by a kernel attempt
++ * to free the page. Since the page fill finally be unreserved, the kernel will
++ * reduce the count to 0 and successfully free the page for us, only once.
++ * sigh... you have to love s&%*^y interfaces that force you to *know* too much
++ * about kernel internals.
++ */
++
++static void *nvos_malloc_pages(unsigned long pages_needed)
+ {
+ unsigned long *page_list = NULL;
+ unsigned long *page_ptr = NULL;
+- unsigned int pages_needed;
+ unsigned int page_list_size;
+
+ /*
+ * allocate a pointer for each physical page and an
+ * integer to hold the number of pages allocated
+ */
+- pages_needed = (size >> PAGE_SHIFT);
+ page_list_size = (pages_needed + 1) * sizeof(unsigned long *);
+
+ page_list = vmalloc(page_list_size);
+@@ -424,11 +455,15 @@
+ return NULL;
+ }
+
+-static void nvos_free(void **page_list)
++// unlock the pages we've locked down for dma purposes
++static void nvos_unlock_pages(void **page_list)
+ {
+ unsigned long *page_ptr;
+ unsigned int pages_left;
+
++ if (page_list == NULL)
++ return;
++
+ page_ptr = (unsigned long *) page_list;
+
+ /* retrieve the number of pages allocated */
+@@ -436,11 +471,71 @@
+
+ while (pages_left) {
+ mem_map_unreserve(GET_MAP_NR(*page_ptr));
++ page_ptr++;
++ pages_left--;
++ }
++}
++
++static void nvos_free_pages(void **page_list)
++{
++ unsigned long *page_ptr;
++ unsigned int pages_left;
++
++ if (page_list == NULL)
++ return;
++
++ page_ptr = (unsigned long *) page_list;
++
++ /* retrieve the number of pages allocated */
++ pages_left = *(unsigned int *) (page_list - 1);
++
++ while (pages_left) {
+ free_page((unsigned long) phys_to_virt(*page_ptr++));
+ pages_left--;
+ }
++}
+
+- vfree(page_list);
++static
++nv_alloc_t *nvos_create_alloc(void)
++{
++ nv_alloc_t *at;
++
++ NV_KMALLOC(at, sizeof(nv_alloc_t));
++ if (at == NULL)
++ return NULL;
++
++ memset(at, 0, sizeof(nv_alloc_t));
++
++ at->process_id = current->pid;
++#if !defined (KERNEL_2_2)
++ at->thread_gid = current->tgid;
++#else
++ at->thread_gid = -1;
++#endif
++
++ return at;
++}
++
++static
++int nvos_free_alloc(
++ nv_alloc_t *at
++)
++{
++ if (at == NULL)
++ return -1;
++
++ if (at->usage_count)
++ return 1;
++
++ // we keep the page_table around after freeing the pages
++ // for bookkeeping reasons. Free the page_table and assume
++ // the underlying pages are already unlocked and freed.
++ if (at->page_table)
++ vfree(at->page_table - 1);
++
++ NV_KFREE(at);
++
++ return 0;
+ }
+
+ static u8 nvos_find_agp_capability(struct pci_dev *dev)
+@@ -981,6 +1076,12 @@
+ void
+ nv_kern_vma_open(struct vm_area_struct *vma)
+ {
++ if (VMA_PRIVATE(vma))
++ {
++ nv_alloc_t *at = (nv_alloc_t *) VMA_PRIVATE(vma);
++ at->usage_count++;
++ }
++
+ MOD_INC_USE_COUNT;
+ }
+
+@@ -988,6 +1089,25 @@
+ void
+ nv_kern_vma_release(struct vm_area_struct *vma)
+ {
++ if (VMA_PRIVATE(vma))
++ {
++ nv_alloc_t *at = (nv_alloc_t *) VMA_PRIVATE(vma);
++
++ at->usage_count--;
++
++ // if usage_count is down to 0, the kernel virtual mapping was freed
++ // but the underlying physical pages were not, due to the reserved bit
++ // being set. We need to clear the reserved bit, then munmap will
++ // zap the pages and free the physical pages.
++ if (at->usage_count == 0)
++ {
++ if (at->page_table)
++ nvos_unlock_pages(at->page_table);
++ nvos_free_alloc(at);
++ VMA_PRIVATE(vma) = NULL;
++ }
++ }
++
+ MOD_DEC_USE_COUNT;
+ }
+
+@@ -1125,6 +1245,7 @@
+ nvl->tl.data = (unsigned long) nv->pdev;
+ tasklet_enable(&nvl->tl);
+
++ memset(&nvl->wq, 0, sizeof(wait_queue_head_t));
+ nv->flags |= NV_FLAG_OPEN;
+ }
+
+@@ -1310,6 +1431,8 @@
+ }
+
+ at->vma = vma;
++ VMA_PRIVATE(vma) = at;
++ at->usage_count++;
+
+ start = vma->vm_start;
+ while (pages--)
+@@ -1344,6 +1467,8 @@
+ }
+
+ at->vma = vma;
++ VMA_PRIVATE(vma) = at;
++ at->usage_count++;
+
+ if (NV_OSAGP_ENABLED(nv))
+ {
+@@ -2163,20 +2288,14 @@
+ int rm_status = 0;
+ nv_linux_state_t *nvl = (nv_linux_state_t *) nv;
+
+- NV_KMALLOC(at, sizeof(nv_alloc_t));
++ at = nvos_create_alloc();
+ if (at == NULL)
+ return RM_ERROR;
+
+- memset(at, 0, sizeof(nv_alloc_t));
+-
+ page_count = RM_PAGES_TO_OS_PAGES(page_count);
+ at->num_pages = page_count;
+-
+- at->process_id = current->pid;
+- at->thread_gid = current->tgid;
+-
+ at->class = class;
+- at->vma = NULL;
++ at->usage_count++;
+
+ if (at->class == NV01_ROOT)
+ {
+@@ -2222,7 +2341,7 @@
+ NV_ADD_AT(nvl, at);
+ } else {
+ /* use nvidia's nvagp support */
+- at->page_table = nvos_malloc(page_count << PAGE_SHIFT);
++ at->page_table = nvos_malloc_pages(page_count);
+ if (at->page_table == NULL)
+ goto failed;
+
+@@ -2246,7 +2365,7 @@
+ nv->agp_buffers++;
+ } else {
+ /* allocate general system memory */
+- at->page_table = nvos_malloc(page_count << PAGE_SHIFT);
++ at->page_table = nvos_malloc_pages(page_count);
+ if (at->page_table == NULL)
+ goto failed;
+
+@@ -2259,10 +2378,10 @@
+ failed:
+ /* free any pages we may have allocated */
+ if (at->page_table)
+- nvos_free(at->page_table);
++ nvos_unlock_and_free_pages(at->usage_count, at->page_table);
++
++ nvos_free_alloc(at);
+
+- /* free it */
+- NV_KFREE(at);
+ return -1;
+ }
+
+@@ -2300,17 +2419,19 @@
+ NV_REMOVE_AT_FROM_LIST(nvl, at, prev);
+ nv_unlock_at(nv);
+
++ at->usage_count--;
++
+ if (NV_OSAGP_ENABLED(nv))
+ {
+ rmStatus = KernFreeAGPPages(pAddress, priv_data);
+ } else {
+ rmStatus = rm_free_agp_pages(nv, pAddress, priv_data);
+- if (rmStatus == 0x0)
+- nvos_free(at->page_table);
++ if (rmStatus == RM_OK)
++ nvos_unlock_and_free_pages(at->usage_count, at->page_table);
+ }
+
+ /* we may hold off on disabling agp until all buffers are freed */
+- if (rmStatus == 0x0)
++ if (rmStatus == RM_OK)
+ {
+ nv->agp_buffers--;
+ if (!nv->agp_buffers && nv->agp_teardown)
+@@ -2325,6 +2446,8 @@
+ NV_REMOVE_AT_FROM_LIST(nvl, at, prev);
+ nv_unlock_at(nv);
+
++ at->usage_count--;
++
+ if (at->class == NV01_ROOT)
+ {
+ int order, i;
+@@ -2342,11 +2465,13 @@
+ }
+ else
+ {
+- nvos_free(at->page_table);
++ nvos_unlock_and_free_pages(at->usage_count, at->page_table);
+ }
+ }
+
+- NV_KFREE(at);
++ if (at->usage_count == 0)
++ nvos_free_alloc(at);
++
+ return rmStatus;
+ }
+
+diff -ru NVIDIA_kernel-1.0-3123-2.5/nv.h NVIDIA_kernel-1.0-3123-2.5-pa/nv.h
+--- NVIDIA_kernel-1.0-3123-2.5/nv.h Sat Oct 26 07:21:10 2002
++++ NVIDIA_kernel-1.0-3123-2.5-pa/nv.h Thu Oct 17 05:30:51 2002
+@@ -195,8 +195,10 @@
+ U032 agp_buffers;
+ U032 agp_teardown;
+
++#ifndef KERNEL_2_5
+ /* keep track of any pending bottom-halves */
+ int bh_count;
++#endif
+
+ /* copy of the video bios in system memory */
+ /* used by general resman code to query bios-set values */
diff --git a/media-video/nvidia-kernel/files/NVIDIA_kernel-1.0-3123-2.5-tl.diff b/media-video/nvidia-kernel/files/NVIDIA_kernel-1.0-3123-2.5-tl.diff
new file mode 100644
index 000000000000..268be2f59c6c
--- /dev/null
+++ b/media-video/nvidia-kernel/files/NVIDIA_kernel-1.0-3123-2.5-tl.diff
@@ -0,0 +1,949 @@
+diff -ru NVIDIA_kernel-1.0-3123/Makefile NVIDIA_kernel-1.0-3123-2.5/Makefile
+--- NVIDIA_kernel-1.0-3123/Makefile Tue Aug 27 16:36:53 2002
++++ NVIDIA_kernel-1.0-3123-2.5/Makefile Sun Oct 20 07:40:08 2002
+@@ -8,7 +8,7 @@
+ OBJECTS=nv.o os-interface.o os-registry.o
+ HEADERS=os-interface.h nv-linux.h nv-misc.h nv.h nv-ids.h rmretval.h nvtypes.h nv_ref.h $(VERSION_HDR)
+
+-CFLAGS=-Wall -Wimplicit -Wreturn-type -Wswitch -Wformat -Wchar-subscripts -Wparentheses -Wpointer-arith -Wcast-qual -Wno-multichar -O -MD $(DEFINES) $(INCLUDES) -Wno-cast-qual
++CFLAGS=-Wall -Wimplicit -Wreturn-type -Wswitch -Wformat -Wchar-subscripts -Wparentheses -Wno-pointer-arith -Wcast-qual -Wno-multichar -O -MD $(DEFINES) $(INCLUDES) -Wno-cast-qual
+
+ RESMAN_KERNEL_MODULE=Module-nvkernel
+
+@@ -58,8 +58,10 @@
+ # allow specification of alternate include file tree on command line and extra defines
+ ifdef SYSINCLUDE
+ INCLUDES += -I$(SYSINCLUDE)
++INCLUDES += -I$(SYSINCLUDE)/../arch/i386/mach-generic
+ else
+ INCLUDES += -I$(KERNINC)
++INCLUDES += -I$(KERNINC)/../arch/i386/mach-generic
+ endif
+
+ DEFINES+=$(EXTRA_DEFINES)
+diff -ru NVIDIA_kernel-1.0-3123/nv-linux.h NVIDIA_kernel-1.0-3123-2.5/nv-linux.h
+--- NVIDIA_kernel-1.0-3123/nv-linux.h Tue Aug 27 16:36:53 2002
++++ NVIDIA_kernel-1.0-3123-2.5/nv-linux.h Sat Oct 26 07:38:02 2002
+@@ -28,19 +28,14 @@
+ #include <linux/module.h>
+ #include <linux/version.h>
+
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 12)
+-# error This driver does not support 2.2.11 or earlier kernels!
+-#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
+-# define KERNEL_2_2
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)
++# error This driver does not support 2.2.x kernels!
+ #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
+-# error This driver does not support 2.3.x development kernels!
++# error This driver does not support 2.3.x kernels!
+ #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
+ # define KERNEL_2_4
+ #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
+-# error This driver does not support 2.5.x development kernels!
+ # define KERNEL_2_5
+-#else
+-# error This driver does not support 2.6.x or newer kernels!
+ #endif
+
+ #if defined (CONFIG_SMP) && !defined (__SMP__)
+@@ -51,7 +46,6 @@
+ #include <linux/errno.h> /* error codes */
+ #include <linux/stddef.h> /* NULL, offsetof */
+ #include <linux/wait.h> /* wait queues */
+-#include <linux/tqueue.h> /* struct tq_struct */
+
+ #include <linux/slab.h> /* kmalloc, kfree, etc */
+ #include <linux/vmalloc.h> /* vmalloc, vfree, etc */
+@@ -59,9 +53,15 @@
+ #include <linux/poll.h> /* poll_wait */
+ #include <linux/delay.h> /* mdelay, udelay */
+
++#ifdef KERNEL_2_5
++#include <linux/sched.h> /* suser(), capable() replacement */
++#include <linux/smp_lock.h> /* kernel_locked */
++#include <asm/kmap_types.h> /* page table entry lookup */
++#endif
++
+ #include <linux/pci.h> /* pci_find_class, etc */
+ #include <linux/wrapper.h> /* mem_map_reserve */
+-#include <linux/interrupt.h> /* mark_bh, init_bh, remove_bh */
++#include <linux/interrupt.h> /* tasklets, interrupt helpers */
+
+ #include <asm/system.h> /* cli, sli, save_flags */
+ #include <asm/io.h> /* ioremap, virt_to_phys */
+@@ -69,14 +69,9 @@
+ #include <asm/page.h> /* PAGE_OFFSET */
+ #include <asm/pgtable.h> /* pte bit definitions */
+
+-#if !defined (KERNEL_2_2)
+ #include <linux/spinlock.h>
+ #include <asm/semaphore.h>
+ #include <linux/highmem.h>
+-#else
+-#include <asm/spinlock.h>
+-#include <asm/semaphore.h>
+-#endif
+
+ #ifdef CONFIG_PROC_FS
+ #include <linux/proc_fs.h>
+@@ -118,16 +113,7 @@
+ #endif
+ #endif
+
+-#if !defined (KERNEL_2_2)
+-# define LINUX_VMA_OFFS(vma) (((vma)->vm_pgoff) << PAGE_SHIFT)
+-# define GET_MODULE_SYMBOL(mod,sym) (const void *) inter_module_get(sym)
+-# define PUT_MODULE_SYMBOL(sym) inter_module_put((char *) sym)
+-# define GET_MAP_NR(phys_page) virt_to_page(__va(phys_page))
+-# define MEM_MAP_READ_COUNT(map_nr) (atomic_read(&(map_nr)->count))
+-# define MEM_MAP_INC_COUNT(map_nr) (atomic_inc(&(map_nr)->count))
+-# define MEM_MAP_DEC_COUNT(map_nr) (atomic_dec(&(map_nr)->count))
+-# define GET_EVENT_QUEUE(nv) ((struct __wait_queue_head *) ((nv)->event_queue))
+-#else
++#ifdef KERNEL_2_2
+ # define in_irq() (local_irq_count[smp_processor_id()])
+ # define LINUX_VMA_OFFS(vma) ((vma)->vm_offset)
+ # define GET_MODULE_SYMBOL(mod, sym) (void*) get_module_symbol((mod), (sym))
+@@ -136,7 +122,50 @@
+ # define MEM_MAP_READ_COUNT(map_nr) (atomic_read(&mem_map[map_nr].count))
+ # define MEM_MAP_INC_COUNT(map_nr) (atomic_inc(&mem_map[map_nr].count))
+ # define MEM_MAP_DEC_COUNT(map_nr) (atomic_dec(&mem_map[map_nr].count))
+-# define GET_EVENT_QUEUE(nv) ((struct wait_queue **) &((nv)->event_queue))
++#else
++# define LINUX_VMA_OFFS(vma) (((vma)->vm_pgoff) << PAGE_SHIFT)
++# define GET_MODULE_SYMBOL(mod,sym) (const void *) inter_module_get(sym)
++# define PUT_MODULE_SYMBOL(sym) inter_module_put((char *) sym)
++# define GET_MAP_NR(phys_page) virt_to_page(__va(phys_page))
++# define MEM_MAP_READ_COUNT(map_nr) (atomic_read(&(map_nr)->count))
++# define MEM_MAP_INC_COUNT(map_nr) (atomic_inc(&(map_nr)->count))
++# define MEM_MAP_DEC_COUNT(map_nr) (atomic_dec(&(map_nr)->count))
++#endif
++
++#ifdef KERNEL_2_5
++# define NV_DEVICE_NUMBER(_minor) ((kdev_val(_minor)) & 0x0f)
++# define NV_IS_CONTROL_DEVICE(_minor) (((kdev_val(_minor)) & 0xff) == 0xff)
++# define SUSER() capable(CAP_SYS_ADMIN)
++# define REMAP_PAGE_RANGE(a...) remap_page_range(vma, ## a)
++# define CLI() local_irq_disable()
++# define SAVE_FLAGS(x) local_save_flags(x)
++# define RESTORE_FLAGS(x) local_irq_restore(x)
++# define MAY_SLEEP() (!in_interrupt() && !in_atomic())
++#else
++# define NV_DEVICE_NUMBER(_minor) ((_minor) & 0x0f)
++# define NV_IS_CONTROL_DEVICE(_minor) (((_minor) & 0xff) == 0xff)
++# define SUSER() suser()
++# define REMAP_PAGE_RANGE(a...) remap_page_range(## a)
++# define CLI() cli()
++# define SAVE_FLAGS(x) save_flags(x)
++# define RESTORE_FLAGS(x) restore_flags(x)
++# define MAY_SLEEP() (!in_interrupt())
++#endif
++
++#ifdef KERNEL_2_5
++#define PTE_OFFSET(pmd, address, pte) (pte = *pte_offset_map(pmd, address))
++#else
++#ifdef pte_offset_atomic
++#define PTE_OFFSET(pmd, address, pte) \
++ { \
++ pte_t *pPTE; \
++ pPTE = pte_offset_atomic(pmd, address); \
++ pte = *pPTE; \
++ pte_kunmap(pPTE); \
++ }
++#else
++#define PTE_OFFSET(pmd, address, pte) (pte = *pte_offset(pmd, address))
++#endif
+ #endif
+
+ #define NV_PAGE_ALIGN(addr) ( ((addr) + PAGE_SIZE - 1) / PAGE_SIZE)
+@@ -193,14 +222,11 @@
+
+ nv_alloc_t *alloc_queue;
+
+- // bottom half interrupt handler info; per device
+- struct tq_struct *bh;
+-
+ U032 vblank_notifier;
+ U032 waiting_for_vblank;
+
+- /* queue for for NV's OS events */
+- void *event_queue;
++ struct tasklet_struct tl;
++ wait_queue_head_t wq;
+
+ /* per-device locking mechanism for access to core rm */
+ spinlock_t rm_lock;
+diff -ru NVIDIA_kernel-1.0-3123/nv.c NVIDIA_kernel-1.0-3123-2.5/nv.c
+--- NVIDIA_kernel-1.0-3123/nv.c Tue Aug 27 16:36:52 2002
++++ NVIDIA_kernel-1.0-3123-2.5/nv.c Sat Oct 26 07:20:31 2002
+@@ -36,7 +36,6 @@
+ // keep track of opened clients and their process id so they
+ // can be free'd up on abnormal close
+ nv_client_t nv_clients[NV_MAX_CLIENTS];
+-struct tq_struct nv_bottom_halves[NV_MAX_CLIENTS];
+
+
+ #ifdef CONFIG_DEVFS_FS
+@@ -45,16 +44,6 @@
+ #endif
+
+ /*
+- * pick apart our minor device number
+- * low 3 bits is NV device
+- * if 255, then its the control device
+- */
+-
+-#define NV_DEVICE_NUMBER(_minor) ((_minor) & 0x0f)
+-#define NV_DEVICE_IS_CONTROL_DEVICE(_minor) \
+- (((_minor) & 0xFF) == 0xFF)
+-
+-/*
+ * Hardware access macros for the kernel driver only
+ * The resource manager part of the driver uses different values here
+ */
+@@ -537,12 +526,7 @@
+ nv_linux_state_t *nvl;
+ nv_linux_state_t *nv_max_devices;
+
+-#if defined (KERNEL_2_2)
+- proc[DRIVER] = create_proc_entry("driver", flags, &proc_root);
+-#else
+ proc[DRIVER] = proc_root_driver;
+-#endif
+-
+ proc[NVIDIA] = create_proc_entry("nvidia", flags, proc[DRIVER]);
+ proc[AGP] = create_proc_entry("agp", flags, proc[NVIDIA]);
+ proc[CARDS] = create_proc_entry("cards", flags, proc[NVIDIA]);
+@@ -595,14 +579,9 @@
+ static void nvos_proc_remove(void)
+ {
+ #ifdef CONFIG_PROC_FS
+-#if defined (KERNEL_2_2)
+- remove_proc_entry("driver", &proc_root);
+- remove_proc_entry("nv", &proc_root);
+-#else
+ remove_proc_entry("nvidia", proc_root_driver);
+ remove_proc_entry("nv", &proc_root);
+ #endif
+-#endif
+ }
+
+ /*
+@@ -834,9 +813,8 @@
+
+ int init_module(void)
+ {
+- nv_linux_state_t *nvl;
+ int rc;
+- int num_devices;
++ int num_devices, i;
+
+ memset(nv_linux_devices, 0, sizeof(nv_linux_devices));
+ num_devices = nvos_probe_devices();
+@@ -848,7 +826,7 @@
+
+ printk("nvidia: loading %s\n", pNVRM_ID);
+
+-#ifdef CONFIG_DEVFS_FS
++#if defined (CONFIG_DEVFS_FS) && !defined (KERNEL_2_5)
+ rc = devfs_register_chrdev(nv_major, "nvidia", &nv_fops);
+ #else
+ rc = register_chrdev(nv_major, "nvidia", &nv_fops);
+@@ -863,7 +841,6 @@
+ memset(nv_dev_handle, 0, sizeof(devfs_handle_t) * NV_MAX_DEVICES);
+ do {
+ char name[10];
+- int i;
+
+ nv_ctl_handle = devfs_register(NULL, "nvidiactl",
+ DEVFS_FL_DEFAULT, nv_major, 255,
+@@ -882,21 +859,27 @@
+
+ NV_DMSG((nv_state_t *) 0, "init_module: major number %d", nv_major);
+
+- // init all the bottom half structures
+- for (nvl = nv_linux_devices; nvl < nv_linux_devices + NV_MAX_DEVICES; nvl++)
+- {
+- nvl->bh = &nv_bottom_halves[nvl - nv_linux_devices];
+- nvl->bh->routine = rm_isr_bh;
+- nvl->bh->data = (void *) 0;
+- nvl->bh->sync = 0;
++ for (i = 0; i < NV_MAX_DEVICES; i++) {
++ /*
++ * We keep one tasklet per card to avoid latency issues with more
++ * than one device, no two instances of a single tasklet are ever
++ * executed concurrently.
++ */
++ atomic_set(&nv_linux_devices[i].tl.count, 1);
++
++ /*
++ * Initialize the event queue for this device. This only needs to
++ * happen once for every device.
++ */
++ init_waitqueue_head(&nv_linux_devices[i].wq);
+ }
+
+ // init the control device
+ {
+ nv_state_t *nv_ctl = NV_STATE_PTR(&nv_ctl_device);
+- nv_ctl_device.event_queue = NULL;
+ nv_ctl->os_state = &nv_ctl_device;
+ nv_lock_init_locks(nv_ctl);
++ init_waitqueue_head(&nv_ctl_device.wq);
+ }
+
+ #ifdef CONFIG_PM
+@@ -918,7 +901,7 @@
+ return 0;
+
+ failed:
+-#ifdef CONFIG_DEVFS_FS
++#if defined (CONFIG_DEVFS_FS) && !defined (KERNEL_2_5)
+ devfs_unregister_chrdev(nv_major, "nvidia");
+ #else
+ unregister_chrdev(nv_major, "nvidia");
+@@ -965,7 +948,7 @@
+ }
+ }
+
+-#ifdef CONFIG_DEVFS_FS
++#if defined (CONFIG_DEVFS_FS) && !defined (KERNEL_2_5)
+ rc = devfs_unregister_chrdev(nv_major, "nvidia");
+ #else
+ rc = unregister_chrdev(nv_major, "nvidia");
+@@ -1068,7 +1051,7 @@
+
+ /* for control device, just jump to its open routine */
+ /* after setting up the private data */
+- if (NV_DEVICE_IS_CONTROL_DEVICE(inode->i_rdev))
++ if (NV_IS_CONTROL_DEVICE(inode->i_rdev))
+ return nv_kern_ctl_open(inode, file);
+
+ /* what device are we talking about? */
+@@ -1134,16 +1117,13 @@
+ goto failed;
+ }
+
+-#if !defined (KERNEL_2_2)
+- NV_KMALLOC(nvl->event_queue, sizeof(struct __wait_queue_head));
+- if (nvl->event_queue == NULL)
+- goto failed;
+- memset(nvl->event_queue, 0, sizeof(struct __wait_queue_head));
+-
+- init_waitqueue_head(GET_EVENT_QUEUE(nvl));
+-#else
+- nvl->event_queue = NULL;
+-#endif
++ /*
++ * Finalize the tasklet initialization started in init_module and
++ * enable bottom-half processing.
++ */
++ nvl->tl.func = rm_isr_bh;
++ nvl->tl.data = (unsigned long) nv->pdev;
++ tasklet_enable(&nvl->tl);
+
+ nv->flags |= NV_FLAG_OPEN;
+ }
+@@ -1178,7 +1158,7 @@
+
+ /* for control device, just jump to its open routine */
+ /* after setting up the private data */
+- if (NV_DEVICE_IS_CONTROL_DEVICE(inode->i_rdev))
++ if (NV_IS_CONTROL_DEVICE(inode->i_rdev))
+ return nv_kern_ctl_close(inode, file);
+
+ NV_DMSG(nv, "close");
+@@ -1188,33 +1168,22 @@
+ nv_lock_ldata(nv);
+ if (--nv->usage_count == 0)
+ {
+- int counter = 0;
+-
+- /* turn off interrupts.
+- ** be careful to make sure any pending bottom half gets run
+- ** or disabled before calling rm_shutdown_adapter() since
+- ** it will free up the pdev. This is hard to see on single
+- ** cpu systems, but easy on dual cpu :-)
+- */
+- // nv_interrupts_disable(nv);
++ /*
++ * The usage count for this device has dropped to zero, it can be
++ * safely shut down; the first step is to disable interrupts.
++ */
+ rm_disable_adapter(nv);
+
+- /* give it a moment to allow any bottom half to run */
+-
+-#define MAX_BH_TASKS 10
+- while ((nv->bh_count) && (counter < MAX_BH_TASKS))
+- {
+- current->state = TASK_INTERRUPTIBLE;
+- schedule_timeout(HZ/50);
+- counter++;
+- }
++ /*
++ * Disable this device's tasklet to make sure that no bottom-half
++ * will run with an undefined device state.
++ */
++ tasklet_disable(&nvl->tl);
+
+ /* free the irq, which may block until any pending interrupts */
+ /* are done being processed. */
+ free_irq(nv->interrupt_line, (void *) nv);
+
+- nvl->bh->data = (void *) 0;
+-
+ rm_shutdown_adapter(nv);
+
+ (void) nv_unmap_device(nv);
+@@ -1234,12 +1203,6 @@
+ }
+ }
+
+-#if !defined (KERNEL_2_2)
+- /* this only needs to be freed on 2.4 and later kernels */
+- NV_KFREE(nvl->event_queue);
+- nvl->event_queue = NULL;
+-#endif
+-
+ /* leave INIT flag alone so we don't reinit every time */
+ nv->flags &= ~(NV_FLAG_OPEN | NV_FLAG_WAITING);
+ }
+@@ -1299,7 +1262,8 @@
+ #if defined(NVCPU_IA64)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ #endif
+- if (remap_page_range(vma->vm_start,
++
++ if (REMAP_PAGE_RANGE(vma->vm_start,
+ (u32)(nv->regs.address) + LINUX_VMA_OFFS(vma) - NV_MMAP_REG_OFFSET,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot))
+@@ -1316,7 +1280,8 @@
+ #if defined(NVCPU_IA64)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ #endif
+- if (remap_page_range(vma->vm_start,
++
++ if (REMAP_PAGE_RANGE(vma->vm_start,
+ (u32)(nv->fb.address) + LINUX_VMA_OFFS(vma) - NV_MMAP_FB_OFFSET,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot))
+@@ -1350,8 +1315,10 @@
+ while (pages--)
+ {
+ page = (unsigned long) at->page_table[i++];
+- if (remap_page_range(start, page, PAGE_SIZE, PAGE_SHARED))
++
++ if (REMAP_PAGE_RANGE(start, page, PAGE_SIZE, PAGE_SHARED))
+ return -EAGAIN;
++
+ start += PAGE_SIZE;
+ pos += PAGE_SIZE;
+ }
+@@ -1388,7 +1355,7 @@
+ }
+ else
+ {
+- rm_map_agp_pages(nv, (void **) &vma->vm_start,
++ rm_map_agp_pages(nv, (void **) &vma,
+ at->class, at->priv_data);
+
+ /* assumes we're not supporting IA64 AGP ourselves */
+@@ -1431,7 +1398,7 @@
+ return nv_kern_ctl_poll (file, wait);
+
+ // add us to the list
+- poll_wait(file, GET_EVENT_QUEUE(nvl), wait);
++ poll_wait(file, &nvl->wq, wait);
+
+ nv_lock_ldata(nv);
+
+@@ -1625,10 +1592,10 @@
+ if (need_to_run_bottom_half)
+ {
+ nv_lock_bh(nv);
++
+ nv->bh_count++;
+- nvl->bh->data = nv->pdev;
+- queue_task(nvl->bh, &tq_immediate);
+- mark_bh(IMMEDIATE_BH);
++ tasklet_schedule(&nvl->tl);
++
+ nv_unlock_bh(nv);
+ }
+ }
+@@ -1706,24 +1673,6 @@
+
+ NV_DMSG(nv, "open");
+
+- /* if this is the first time the control device has been opened,
+- * allocate the wait queue
+- */
+-
+- if (! nvl->event_queue) {
+-
+-#if !defined (KERNEL_2_2)
+- NV_KMALLOC(nvl->event_queue, sizeof(struct __wait_queue_head));
+- if (nvl->event_queue == NULL)
+- return -ENOMEM;
+- memset(nvl->event_queue, 0, sizeof(struct __wait_queue_head));
+-
+- init_waitqueue_head(GET_EVENT_QUEUE(nvl));
+-#else
+- nvl->event_queue = NULL;
+-#endif
+- }
+-
+ nv->flags |= NV_FLAG_OPEN + NV_FLAG_CONTROL;
+
+ /* turn off the hotkey occurred bit */
+@@ -1750,14 +1699,11 @@
+ NV_DMSG(nv, "close");
+
+ nv_lock_ldata(nv);
+- if (--nv->usage_count == 0)
+- {
+-#if !defined (KERNEL_2_2)
+- nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
+- /* this only needs to be freed on 2.4 and later kernels */
+- NV_KFREE(nvl->event_queue);
+- nvl->event_queue = 0;
+-#endif
++ if (--nv->usage_count == 0) {
++ /*
++ * The control device has been released; with no physical devices
++ * backing it, we only need to reset the flags;
++ */
+ nv->flags = 0;
+ }
+ nv_unlock_ldata(nv);
+@@ -1795,7 +1741,7 @@
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+- poll_wait(file, GET_EVENT_QUEUE(nvl), wait);
++ poll_wait(file, &nvl->wq, wait);
+
+ nv_lock_ldata(nv);
+
+@@ -1825,7 +1771,7 @@
+ nv_ctl_device.nv_state.flags |= NV_FLAG_HOTKEY_OCCURRED;
+ nv_unlock_ldata(&(nv_ctl_device.nv_state));
+
+- wake_up_interruptible(GET_EVENT_QUEUE(&nv_ctl_device));
++ wake_up_interruptible(&nv_ctl_device.wq);
+ }
+
+ struct host_bridge_t {
+@@ -2145,7 +2091,7 @@
+ {
+ pgd_t *pg_dir;
+ pmd_t *pg_mid_dir;
+- pte_t *pte__, pte;
++ pte_t pte;
+
+ /* XXX do we really need this? */
+ if (address > VMALLOC_START)
+@@ -2173,14 +2119,7 @@
+ if (pmd_none(*pg_mid_dir))
+ goto failed;
+
+-#if defined (pte_offset_atomic)
+- pte__ = pte_offset_atomic(pg_mid_dir, address);
+- pte = *pte__;
+- pte_kunmap(pte__);
+-#else
+- pte__ = NULL;
+- pte = *pte_offset(pg_mid_dir, address);
+-#endif
++ PTE_OFFSET(pg_mid_dir, address, pte);
+
+ if (!pte_present(pte))
+ goto failed;
+@@ -2234,11 +2173,7 @@
+ at->num_pages = page_count;
+
+ at->process_id = current->pid;
+-#if !defined (KERNEL_2_2)
+ at->thread_gid = current->tgid;
+-#else
+- at->thread_gid = -1;
+-#endif
+
+ at->class = class;
+ at->vma = NULL;
+@@ -2762,7 +2697,7 @@
+
+ nvfp->any_fired_notifiers++;
+
+- wake_up_interruptible(GET_EVENT_QUEUE(nvl));
++ wake_up_interruptible(&nvl->wq);
+ }
+
+ /*
+@@ -2779,7 +2714,7 @@
+ if (nvl->waiting_for_vblank)
+ nvl->vblank_notifier++;
+
+- wake_up_interruptible(GET_EVENT_QUEUE(nvl));
++ wake_up_interruptible(&nvl->wq);
+ }
+
+
+@@ -2827,12 +2762,8 @@
+ if ( (NV_AGP_DISABLED(nv)) && (config & NVOS_AGP_CONFIG_NVAGP) )
+ {
+ /* make sure the user does not have agpgart loaded */
+-#if !defined (KERNEL_2_2)
+ if (inter_module_get("drm_agp")) {
+ inter_module_put("drm_agp");
+-#else
+- if (GET_MODULE_SYMBOL(0, __MODULE_STRING(agp_enable))) {
+-#endif
+ printk("NVRM: not using NVAGP, AGPGART is loaded!!\n");
+ } else
+ status = rm_init_agp(nv);
+diff -ru NVIDIA_kernel-1.0-3123/os-interface.c NVIDIA_kernel-1.0-3123-2.5/os-interface.c
+--- NVIDIA_kernel-1.0-3123/os-interface.c Tue Aug 27 16:36:52 2002
++++ NVIDIA_kernel-1.0-3123-2.5/os-interface.c Sat Oct 26 07:49:14 2002
+@@ -27,7 +27,7 @@
+
+ BOOL os_is_administrator(PHWINFO pDev)
+ {
+- return suser();
++ return SUSER();
+ }
+
+ U032 os_get_page_size(VOID)
+@@ -169,6 +169,11 @@
+ U032 size
+ )
+ {
++ /*
++ * XXX This needs to be !MAY_SLEEP() rather than in_interrupt(); this
++ * requires that quite a bit of locking be rearranged, however, which
++ * is why I'll leave this alone for now.
++ */
+ if (in_interrupt()) {
+ if (size <= KMALLOC_LIMIT) {
+ /*
+@@ -347,7 +352,7 @@
+ if (in_irq() && MilliSeconds > NV_MAX_ISR_MDELAY)
+ return RM_ERROR;
+
+- if (in_interrupt())
++ if (!MAY_SLEEP())
+ {
+ mdelay(MilliSeconds);
+ return RM_OK;
+@@ -937,14 +942,14 @@
+
+ ULONG os_cli(ULONG flags)
+ {
+- save_flags(flags);
+- cli();
++ SAVE_FLAGS(flags);
++ CLI();
+ return flags;
+ }
+
+ ULONG os_sti(ULONG flags)
+ {
+- restore_flags(flags);
++ RESTORE_FLAGS(flags);
+ return flags;
+ }
+
+@@ -1064,27 +1069,10 @@
+ {
+ void *vaddr;
+
+- if (in_interrupt())
+- {
+- nv_printf("trying to map 0x%x to kernel space in interrupt!\n", start);
+- os_dbg_breakpoint();
+- return NULL;
+- }
+-
+- {
+-#if defined (KERNEL_2_2)
+- unsigned long map_nr = MAP_NR(__va(start));
+- if (map_nr < max_mapnr) {
+- vaddr = __va(start);
+- } else
+-#endif
+- {
+- if (mode == NV_MEMORY_DEFAULT) {
+- vaddr = ioremap(start, size_bytes);
+- } else {
+- vaddr = ioremap_nocache(start, size_bytes);
+- }
+- }
++ if (mode == NV_MEMORY_DEFAULT) {
++ vaddr = ioremap(start, size_bytes);
++ } else {
++ vaddr = ioremap_nocache(start, size_bytes);
+ }
+
+ #ifdef DEBUG
+@@ -1106,16 +1094,7 @@
+ U032 size_bytes
+ )
+ {
+-#if defined (KERNEL_2_2)
+- if (MAP_NR(addr) < max_mapnr) {
+- // if we didn't want the memory cached, this isn't necessary
+- // but we shouldn't be in a timing critical piece of code.
+- asm volatile("wbinvd":::"memory");
+- } else
+-#endif
+- {
+- iounmap(addr);
+- }
++ iounmap(addr);
+ }
+
+ VOID* os_map_user_space(
+@@ -1125,31 +1104,7 @@
+ U032 mode
+ )
+ {
+- int err;
+- void *paddr = NULL, *uaddr = NULL;
+-
+- if ( (size_t) kaddr > VMALLOC_START) {
+- nv_ioctl_memory_vtop_t parms;
+- parms.buffer = (NvP64)(NV_UINTPTR_T) kaddr;
+- parms.byte_length = 1;
+- parms.physical_addresses = (NvP64)(NV_UINTPTR_T) &paddr;
+- nv_vtop(&parms, 0);
+- } else {
+- paddr = (void *) virt_to_phys(kaddr);
+- }
+-
+- uaddr = *priv;
+-
+- /* finally, let's do it! */
+- err = remap_page_range( (size_t) uaddr, (size_t) paddr, size_bytes,
+- PAGE_SHARED);
+-
+- if (err != 0)
+- {
+- return (void *) NULL;
+- }
+-
+- return uaddr;
++ return NULL;
+ }
+
+ VOID os_unmap_user_space(
+@@ -1157,7 +1112,7 @@
+ VOID *priv
+ )
+ {
+- // I don't think I need to do anything here...
++ return;
+ }
+
+ VOID* os_map_io_space(
+@@ -1168,24 +1123,18 @@
+ U032 mode
+ )
+ {
+- int err;
+- void *uaddr = NULL;
++ struct vm_area_struct *vma;
+
+- if (!user)
++ if (user == 0 || priv == NULL || *priv == NULL)
+ return NULL;
+
+- uaddr = *priv;
++ vma = (struct vm_area_struct *) *priv;
+
+- /* finally, let's do it! */
+- err = remap_page_range( (size_t) uaddr, (size_t) start, size_bytes,
+- PAGE_SHARED);
+-
+- if (err != 0)
+- {
+- return (void *) NULL;
+- }
++ if (REMAP_PAGE_RANGE(vma->vm_start,
++ start & PAGE_MASK, size_bytes, PAGE_SHARED))
++ return NULL;
+
+- return uaddr;
++ return (void *)(NV_UINTPTR_T) vma->vm_start;
+ }
+
+ VOID os_unmap_io_space(
+@@ -1240,7 +1189,11 @@
+ typedef struct {
+ int (*backend_acquire)(void);
+ void (*backend_release)(void);
++#ifdef KERNEL_2_5
++ int (*copy_info)(agp_kern_info *);
++#else
+ void (*copy_info)(agp_kern_info *);
++#endif
+ agp_memory * (*allocate_memory)(size_t, unsigned int);
+ void (*free_memory)(agp_memory *);
+ int (*bind_memory)(agp_memory *, off_t);
+@@ -1251,39 +1204,16 @@
+ agp_operations_struct agp_ops;
+ agp_kern_info agpinfo;
+ agp_gart gart;
+-#if !defined (KERNEL_2_2)
+ const drm_agp_t *drm_agp_p;
+-#endif
+-
+-#if defined (KERNEL_2_2)
+- #define GET_AGPGART_SYMBOL(sym, sym_string) \
+- sym = (void*) GET_MODULE_SYMBOL(0, sym_string); \
+- if (sym == NULL) \
+- { \
+- printk("NVRM: AGPGART: unable to retrieve symbol %s\n", sym_string); \
+- return 1; \
+- }
+-
+- #define AGP_BACKEND_ACQUIRE_SYM __MODULE_STRING(agp_backend_acquire)
+- #define AGP_BACKEND_RELEASE_SYM __MODULE_STRING(agp_backend_release)
+- #define AGP_COPY_INFO_SYM __MODULE_STRING(agp_copy_info)
+- #define AGP_ALLOCATE_MEMORY_SYM __MODULE_STRING(agp_allocate_memory)
+- #define AGP_FREE_MEMORY_SYM __MODULE_STRING(agp_free_memory)
+- #define AGP_BIND_MEMORY_SYM __MODULE_STRING(agp_bind_memory)
+- #define AGP_UNBIND_MEMORY_SYM __MODULE_STRING(agp_unbind_memory)
+- #define AGP_ENABLE_SYM __MODULE_STRING(agp_enable)
+-#endif
+
+ BOOL KernInitAGP(VOID **ap_phys_base, VOID **ap_mapped_base, U032 *apsize)
+ {
+ U032 agp_rate;
+ U032 agp_sba;
+ U032 agp_fw;
+- char* chipset;
+
+ memset( (void *) &gart, 0, sizeof(agp_gart));
+
+-#if !defined (KERNEL_2_2)
+ if (!(drm_agp_p = inter_module_get_request("drm_agp", "agpgart"))) {
+ printk(KERN_ERR "NVRM: AGPGART: unable to retrieve symbol table\n");
+ return 1;
+@@ -1298,24 +1228,6 @@
+ agp_ops.unbind_memory = drm_agp_p->unbind_memory;
+ agp_ops.enable = drm_agp_p->enable;
+
+-#else
+- #if defined(CONFIG_KMOD)
+- if ( request_module("agpgart") ) {
+- printk(KERN_INFO "NVRM: AGPGART: not loading agpgart.o\n");
+- return 1;
+- }
+- #endif
+-
+- GET_AGPGART_SYMBOL(agp_ops.backend_acquire, AGP_BACKEND_ACQUIRE_SYM);
+- GET_AGPGART_SYMBOL(agp_ops.backend_release, AGP_BACKEND_RELEASE_SYM);
+- GET_AGPGART_SYMBOL(agp_ops.copy_info, AGP_COPY_INFO_SYM);
+- GET_AGPGART_SYMBOL(agp_ops.allocate_memory, AGP_ALLOCATE_MEMORY_SYM);
+- GET_AGPGART_SYMBOL(agp_ops.free_memory, AGP_FREE_MEMORY_SYM);
+- GET_AGPGART_SYMBOL(agp_ops.bind_memory, AGP_BIND_MEMORY_SYM);
+- GET_AGPGART_SYMBOL(agp_ops.unbind_memory, AGP_UNBIND_MEMORY_SYM);
+- GET_AGPGART_SYMBOL(agp_ops.enable, AGP_ENABLE_SYM);
+-#endif
+-
+ /* NOTE: from here down, return an error code of '-1'
+ * that indicates that agpgart is loaded, but we failed to use it
+ * in some way. This is so we don't try to use nvagp and lock up
+@@ -1338,44 +1250,15 @@
+ if (os_read_registry_dword(NULL, "NVreg", "EnableAGPFW", &agp_fw) == RM_ERROR)
+ agp_fw = 1;
+ agp_fw &= 0x00000001;
+-
+- (*(agp_ops.copy_info))(&agpinfo);
+
+- switch ( agpinfo.chipset ) {
+- case INTEL_GENERIC: chipset = "Intel"; break;
+- case INTEL_LX: chipset = "Intel 440LX"; break;
+- case INTEL_BX: chipset = "Intel 440BX"; break;
+- case INTEL_GX: chipset = "Intel 440GX"; break;
+- case INTEL_I810: chipset = "Intel i810"; break;
+- case INTEL_I840: chipset = "Intel i840"; break;
+-#if !defined (KERNEL_2_2)
+- case INTEL_I815: chipset = "Intel i815"; break;
+-#if !defined(__rh_config_h__)
+- case INTEL_I850: chipset = "Intel i850"; break;
+-#endif
+-#endif
+-#if defined(NVCPU_IA64)
+- case INTEL_460GX: chipset = "Intel 460GX"; break;
+-#endif
+- case VIA_GENERIC: chipset = "VIA"; break;
+- case VIA_VP3: chipset = "VIA VP3"; break;
+- case VIA_MVP3: chipset = "VIA MVP3"; break;
+- case VIA_MVP4: chipset = "VIA MVP4"; break;
+-#if !defined (KERNEL_2_2)
+- case VIA_APOLLO_KX133: chipset = "VIA Apollo KX133"; break;
+- case VIA_APOLLO_KT133: chipset = "VIA Apollo KT133"; break;
+-#endif
+- case VIA_APOLLO_PRO: chipset = "VIA Apollo Pro"; break;
+- case SIS_GENERIC: chipset = "SiS"; break;
+- case AMD_GENERIC: chipset = "AMD"; break;
+- case AMD_IRONGATE: chipset = "AMD Irongate"; break;
+- case ALI_M1541: chipset = "ALi M1541"; break;
+- case ALI_GENERIC: chipset = "ALi"; break;
+- case NOT_SUPPORTED: chipset = "unsupported"; break;
+- default: chipset = "unknown";
++#ifdef KERNEL_2_5
++ if (agp_ops.copy_info(&agpinfo) != 0) {
++ printk("nvidia: chipset not supported by agpgart.o\n");
++ agp_ops.backend_release();
+ }
+-
+- printk(KERN_INFO "NVRM: AGPGART: %s chipset\n", chipset);
++#else
++ (*(agp_ops.copy_info))(&agpinfo);
++#endif
+
+ #ifdef CONFIG_MTRR
+ if ((gart.mtrr = mtrr_add(agpinfo.aper_base,
+@@ -1452,10 +1335,7 @@
+ }
+
+ (*(agp_ops.backend_release))();
+-
+-#if !defined (KERNEL_2_2)
+ inter_module_put("drm_agp");
+-#endif
+
+ printk(KERN_INFO "NVRM: AGPGART: backend released\n");
+ return 0;
+@@ -1593,13 +1473,9 @@
+
+ agp_addr = agpinfo.aper_base + (agp_data->offset << PAGE_SHIFT);
+
+- err = remap_page_range(vma->vm_start, (size_t) agp_addr,
++ err = REMAP_PAGE_RANGE(vma->vm_start, (size_t) agp_addr,
+ agp_data->num_pages << PAGE_SHIFT,
+-#if defined(NVCPU_IA64)
+ vma->vm_page_prot);
+-#else
+- PAGE_SHARED);
+-#endif
+
+ if (err) {
+ printk(KERN_ERR "NVRM: AGPGART: unable to remap %lu pages\n",
+@@ -1782,11 +1658,9 @@
+ if (sgi_funcs.add_barrier == NULL)
+ {
+ #if defined(TESTING_SWAP)
+-#if !defined (KERNEL_2_2)
+ inter_module_register(ADD_BARRIER_FUNC, THIS_MODULE, sgitest_add_barrier);
+ inter_module_register(REMOVE_BARRIER_FUNC, THIS_MODULE, sgitest_remove_barrier);
+ inter_module_register(SWAP_READY_FUNC, THIS_MODULE, sgitest_swap_ready);
+-#endif
+ #endif
+ sgi_funcs.add_barrier = GET_MODULE_SYMBOL(0, ADD_BARRIER_FUNC);
+ sgi_funcs.remove_barrier = GET_MODULE_SYMBOL(0, REMOVE_BARRIER_FUNC);
diff --git a/media-video/nvidia-kernel/files/digest-nvidia-kernel-1.0.3123-r2 b/media-video/nvidia-kernel/files/digest-nvidia-kernel-1.0.3123-r2
new file mode 100644
index 000000000000..b183624bce27
--- /dev/null
+++ b/media-video/nvidia-kernel/files/digest-nvidia-kernel-1.0.3123-r2
@@ -0,0 +1 @@
+MD5 9496c1b260985eaea59d3760b1e42eb4 NVIDIA_kernel-1.0-3123.tar.gz 428232
diff --git a/media-video/nvidia-kernel/nvidia-kernel-1.0.3123-r2.ebuild b/media-video/nvidia-kernel/nvidia-kernel-1.0.3123-r2.ebuild
new file mode 100644
index 000000000000..90eeb4729c31
--- /dev/null
+++ b/media-video/nvidia-kernel/nvidia-kernel-1.0.3123-r2.ebuild
@@ -0,0 +1,101 @@
+# Copyright 1999-2002 Gentoo Technologies, Inc.
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-x86/media-video/nvidia-kernel/nvidia-kernel-1.0.3123-r2.ebuild,v 1.1 2002/12/08 20:16:28 azarah Exp $
+
+inherit eutils
+
+# Make sure Portage does _NOT_ strip symbols. Need both lines for
+# Portage 1.8.9+
+DEBUG="yes"
+RESTRICT="nostrip"
+
+NV_V="${PV/1.0./1.0-}"
+NV_PACKAGE="NVIDIA_kernel-${NV_V}"
+S="${WORKDIR}/${NV_PACKAGE}"
+DESCRIPTION="Linux kernel module for the NVIDIA's X driver"
+SRC_URI="http://download.nvidia.com/XFree86_40/${NV_V}/${NV_PACKAGE}.tar.gz"
+HOMEPAGE="http://www.nvidia.com/"
+
+# The slow needs to be set to $KV to prevent unmerges of
+# modules for other kernels.
+LICENSE="NVIDIA"
+SLOT="${KV}"
+KEYWORDS="x86 -ppc -sparc -sparc64"
+
+DEPEND="virtual/linux-sources
+ >=sys-apps/portage-1.9.10"
+
+
+src_unpack() {
+ unpack ${A}
+
+ # Next section applies patches for linux-2.5 kernel, or if
+ # linux-2.4, the page_alloc.c patch courtesy of NVIDIA Corporation.
+ # All these are from:
+ #
+ # http://www.minion.de/nvidia/
+ #
+ # Many thanks to Christian Zander <zander@minion.de> for bringing
+ # these to us, and being so helpful to select which to use.
+ # This should close bug #9704.
+
+ local KV_major="`uname -r | cut -d. -f1`"
+ local KV_minor="`uname -r | cut -d. -f2`"
+
+ cd ${S}
+ if [ "${KV_major}" -eq 2 -a "${KV_minor}" -eq 5 ]
+ then
+ EPATCH_SINGLE_MSG="Applying tasklet patch for kernel 2.5..." \
+ epatch ${FILESDIR}/${NV_PACKAGE}-2.5-tl.diff
+ EPATCH_SINGLE_MSG="Applying page_alloc.c patch..." \
+ epatch ${FILESDIR}/${NV_PACKAGE}-2.5-tl-pa.diff
+ else
+ EPATCH_SINGLE_MSG="Applying page_alloc.c patch..." \
+ epatch ${FILESDIR}/${NV_PACKAGE}-pa.diff
+ fi
+}
+
+src_compile() {
+ # Portage should determine the version of the kernel sources
+ check_KV
+ #IGNORE_CC_MISMATCH disables a sanity check that's needed when gcc has been
+ #updated but the running kernel is still compiled with an older gcc. This is
+ #needed for chrooted building, where the sanity check detects the gcc of the
+ #kernel outside the chroot rather than within.
+ make IGNORE_CC_MISMATCH="yes" KERNDIR="/usr/src/linux" \
+ clean NVdriver || die
+}
+
+src_install() {
+ # The driver goes into the standard modules location
+ insinto /lib/modules/${KV}/video
+ doins NVdriver
+
+ # Add the aliases
+ insinto /etc/modules.d
+ doins ${FILESDIR}/nvidia
+
+ # Docs
+ dodoc ${S}/README
+
+ # The device creation script
+ into /
+ newsbin ${S}/makedevices.sh NVmakedevices.sh
+}
+
+pkg_postinst() {
+ if [ "${ROOT}" = "/" ]
+ then
+ # Update module dependency
+ [ -x /usr/sbin/update-modules ] && /usr/sbin/update-modules
+ if [ ! -e /dev/.devfsd ] && [ -x /sbin/NVmakedevices.sh ]
+ then
+ /sbin/NVmakedevices.sh >/dev/null 2>&1
+ fi
+ fi
+
+ einfo "If you are not using devfs, loading the module automatically at"
+ einfo "boot up, you need to add \"NVdriver\" to your /etc/modules.autoload."
+ einfo
+}
+