bug-hurd
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

memory leak in DMA buffers allocation?


From: Da Zheng
Subject: memory leak in DMA buffers allocation?
Date: Sun, 07 Feb 2010 22:08:27 +0800
User-agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.1.7) Gecko/20100111 Thunderbird/3.0.1

Hi,

I wrote a RPC in GNUMach to allocate contiguous physical memory for DMA buffers.
The code is shown as below. I have been using this function for quite a long
time, but it seems it can cause some kind of memory leak. If I run the pcnet32
driver, which uses this function to allocate DMA buffers, for many times, the
system seems to run out of memory. I checked the code, but didn't find the place
which could cause memory leak. After a process is terminated, the memory and
relevant objects allocated by vm_dma_buff_alloc should be freed automatically. I
don't have much experience of Mach development. Maybe that's why I fail to find
the problem.

By the way, feel free to point out any other problems in this function. antrik
said this implementation isn't consistent with Mach's memory management and I
should make it work like device_map, so there will be a pager in the kernel. I
basically agree with that. What should the interface be? any suggestions?

Best regards,
Zheng Da

+void vm_pages_release(npages, pages, external)
+       int                     npages;
+       vm_page_t               *pages;
+       boolean_t               external;
+{
+       int i;
+
+       for (i = 0; i < npages; i++)
+       {
+               vm_page_release (pages[i], external);
+       }
+}
+
+kern_return_t vm_dma_buff_alloc(host_priv, map, size, result_vaddr, 
result_paddr)
+       host_t                  host_priv;
+       vm_map_t                map;
+       vm_size_t               size;
+       vm_address_t            *result_vaddr;
+       vm_address_t            *result_paddr;
+{
+       extern vm_size_t        vm_page_big_pagenum;
+       extern vm_offset_t      phys_first_addr;
+       extern vm_offset_t      phys_last_addr;
+
+       int                     npages;
+       int                     i;
+       vm_page_t               *pages;
+       vm_object_t             object;
+       vm_map_entry_t          entry;
+       kern_return_t           kr;
+       vm_address_t            vaddr;
+       vm_offset_t             offset = 0;
+
+       if (host_priv == HOST_NULL)
+               return KERN_INVALID_HOST;
+
+       if (map == VM_MAP_NULL)
+               return KERN_INVALID_TASK;
+
+       size = round_page(size);
+
+       /* We allocate the contiguous physical pages for the buffer. */
+
+       npages = size / PAGE_SIZE;
+       pages = (vm_page_t) kalloc (npages * sizeof (vm_page_t));
+       if (pages == NULL)
+       {
+               return KERN_RESOURCE_SHORTAGE;
+       }
+       
+       if (vm_page_big_pagenum == 0)
+               vm_page_big_pagenum = atop(phys_last_addr - phys_first_addr);
+
+       kr = vm_page_grab_contiguous_pages(npages, pages, NULL, TRUE);
+       if (kr)
+       {
+               kfree (pages, npages * sizeof (vm_page_t));
+               return kr;
+       }
+
+       /* Allocate the object
+        * and find the virtual address for the DMA buffer */
+
+       object = vm_object_allocate(size);
+       vm_map_lock(map);
+       /* TODO user_wired_count might need to be set as 1 */
+       kr = vm_map_find_entry(map, &vaddr, size, (vm_offset_t) 0,
+                              VM_OBJECT_NULL, &entry);
+       if (kr != KERN_SUCCESS)
+       {
+               vm_map_unlock(map);
+               vm_object_deallocate(object);
+               kfree (pages, npages * sizeof (vm_page_t));
+               vm_pages_release (npages, pages, TRUE);
+               return kr;
+       }
+       
+       entry->object.vm_object = object;
+       entry->offset = 0;
+
+       /* We can unlock map now.  */
+       vm_map_unlock(map);
+
+       /* We have physical pages we need and now we need to do the mapping. */
+
+       pmap_pageable (map->pmap, vaddr, vaddr + size, FALSE);
+
+       *result_vaddr = vaddr;
+       *result_paddr = pages[0]->phys_addr;
+
+       for (i = 0; i < npages; i++)
+       {
+               vm_object_lock(object);
+               vm_page_lock_queues();
+               vm_page_insert(pages[i], object, offset);
+               vm_page_wire(pages[i]);
+               vm_page_unlock_queues();
+               vm_object_unlock(object);
+
+               /* Enter it in the kernel pmap */
+               PMAP_ENTER(map->pmap, vaddr, pages[i], VM_PROT_DEFAULT, TRUE);
+
+               vm_object_lock(object);
+               PAGE_WAKEUP_DONE(pages[i]);
+               vm_object_unlock(object);
+
+               vaddr += PAGE_SIZE;
+               offset += PAGE_SIZE;
+       }
+
+       kfree ((vm_offset_t) pages, npages * sizeof (vm_page_t));
+       return KERN_SUCCESS;
+}




reply via email to

[Prev in Thread] Current Thread [Next in Thread]