Chinaunix首页 | 论坛 | 博客
  • 博客访问: 1291640
  • 博文数量: 196
  • 博客积分: 4141
  • 博客等级: 中将
  • 技术积分: 2253
  • 用 户 组: 普通用户
  • 注册时间: 2009-03-21 20:04
文章存档

2019年(31)

2016年(1)

2014年(16)

2011年(8)

2010年(25)

2009年(115)

分类: LINUX

2009-03-22 14:53:19

export symbols for lguest as a module

lguest does some fairly lowlevel things to support a host, which normal modules don't need:

math_state_restore:
When the guest triggers a Device Not Available fault, we need
to be able to restore the FPU

   1. 1037 /*
   2. 1038 * '
math_state_restore()' saves the current math information in the
   3. 1039 * old math state array, and gets the new ones from the current task
   4. 1040 *
   5. 1041 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
   6. 1042 * Don'
t touch unless you *really* know how it works.
   7. 1043 *
   8. 1044 * Must be called with kernel preemption disabled (in this case,
   9. 1045 * local interrupts are disabled at the call-site in entry.S).
  10. 1046 */
  11. 1047 asmlinkage void math_state_restore(void)
  12. 1048 {
  13. 1049 struct thread_info *thread = current_thread_info();
  14. 1050 struct task_struct *tsk = thread->task;
  15. 1051
  16. 1052 clts(); /* Allow maths ops (or we recurse) */
  17. 1053 if (!tsk_used_math(tsk))
  18. 1054 init_fpu(tsk);
  19. 1055 restore_fpu(tsk);
  20. 1056 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
  21. 1057 tsk->fpu_counter++;
  22. 1058 }
  23. 1059 EXPORT_SYMBOL_GPL(math_state_restore);
  24.

 

当模块被装载时,它被动态链接到内核.像用户空间的动态库一样,只有把函数导出才能 被调用. 在内核中, 导出内核函数使用这两个声明: called EXPORT_ SYMBOL() 和 EXPORT_SYMBOL_GPL().导出内核的符号表后被看作还是导出的内核的接口,有时,也称为内核的API.(详细请看《Linux kernel Development》Second Edition第16章第7小节)



__put_task_struct:
We need to hold a reference to another task for inter-guest I/O, and put_task_struct() is an inline function which calls __put_task_struct.

 

   1. 116 void __put_task_struct(struct task_struct *tsk)
   2. 117 {
   3. 118 WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE)));
   4. 119 WARN_ON(atomic_read(&tsk->usage));
   5. 120 WARN_ON(tsk == current);
   6. 121
   7. 122 security_task_free(tsk);
   8. 123 free_uid(tsk->user);
   9. 124 put_group_info(tsk->group_info);
  10. 125 delayacct_tsk_free(tsk);
  11. 126
  12. 127 if (!profile_handoff_task(tsk))
  13. 128 free_task(tsk);
  14. 129 }
  15. 130 EXPORT_SYMBOL_GPL(__put_task_struct);
  16.

access_process_vm:
We need to access another task for inter-guest I/O.


   1. 2816 /*
   2. 2817 * Access another process' address space.
   3. 2818 * Source/target buffer must be kernel space,
   4. 2819 * Do not walk the page table directly, use get_user_pages
   5. 2820 */

   6. 2821 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
   7. 2822 {
   8. 2823 struct mm_struct *mm;
   9. 2824 struct vm_area_struct *vma;
  10. 2825 struct page *page;
  11. 2826 void *old_buf = buf;
  12. 2827
  13. 2828 mm = get_task_mm(tsk);
  14. 2829 if (!mm)
  15. 2830 return 0;
  16. 2831
  17. 2832 down_read(&mm->mmap_sem);
  18. 2833 /* ignore errors, just check how much was sucessfully transfered */
  19. 2834 while (len) {
  20. 2835 int bytes, ret, offset;
  21. 2836 void *maddr;
  22. 2837
  23. 2838 ret = get_user_pages(tsk, mm, addr, 1,
  24. 2839 write, 1, &page, &vma);
  25. 2840 if (ret <= 0)
  26. 2841 break;
  27. 2842
  28. 2843 bytes = len;
  29. 2844 offset = addr & (PAGE_SIZE-1);
  30. 2845 if (bytes > PAGE_SIZE-offset)
  31. 2846 bytes = PAGE_SIZE-offset;
  32. 2847
  33. 2848 maddr = kmap(page);
  34. 2849 if (write) {
  35. 2850 copy_to_user_page(vma, page, addr,
  36. 2851 maddr + offset, buf, bytes);
  37. 2852 set_page_dirty_lock(page);
  38. 2853 } else {
  39. 2854 copy_from_user_page(vma, page, addr,
  40. 2855 buf, maddr + offset, bytes);
  41. 2856 }
  42. 2857 kunmap(page);
  43. 2858 page_cache_release(page);
  44. 2859 len -= bytes;
  45. 2860 buf += bytes;
  46. 2861 addr += bytes;
  47. 2862 }
  48. 2863 up_read(&mm->mmap_sem);
  49. 2864 mmput(mm);
  50. 2865
  51. 2866 return buf - old_buf;
  52. 2867 }
  53. 2868 EXPORT_SYMBOL_GPL(access_process_vm);
  54.

map_vm_area & __get_vm_area:
We need to map the switcher shim (ie. monitor) at 0xFFC01000.


   1. /*
   2. * each page frame allocated to the noncontiguous memory area is now associated with * a linear address included in the interval of contiguous linear addresses.
   3. */

   4. 148 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
   5. 149 {
   6. 150 pgd_t *pgd;
   7. 151 unsigned long next;
   8. 152 unsigned long addr = (unsigned long) area->addr;
   9. 153 unsigned long end = addr + area->size - PAGE_SIZE;
  10. 154 int err;
  11. 155
  12. 156 BUG_ON(addr >= end);
  13. 157 pgd = pgd_offset_k(addr);
  14. 158 do {
  15. 159 next = pgd_addr_end(addr, end);
  16. 160 err = vmap_pud_range(pgd, addr, next, prot, pages);
  17. 161 if (err)
  18. 162 break;
  19. 163 } while (pgd++, addr = next, addr != end);
  20. 164 flush_cache_vmap((unsigned long) area->addr, end);
  21. 165 return err;
  22. 166 }
  23. 167 EXPORT_SYMBOL_GPL(map_vm_area);
  24. 241 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
  25. 242 unsigned long start, unsigned long end)
  26. 243 {
  27. 244 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL);
  28. 245 }
  29. 246 EXPORT_SYMBOL_GPL(__get_vm_area);
  30.


阅读(1640) | 评论(0) | 转发(2) |
给主人留下些什么吧!~~