1 /******************************************************************************
2 * include/asm-ia64/shadow.h
4 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 //#include <linux/kernel.h>
24 #include <linux/spinlock.h>
25 #include <linux/bootmem.h>
26 #include <linux/module.h>
27 #include <linux/vmalloc.h>
29 #include <asm/hypervisor.h>
30 #include <asm/hypercall.h>
31 #include <xen/interface/memory.h>
32 #include <xen/balloon.h>
34 shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)XSI_BASE;
35 EXPORT_SYMBOL(HYPERVISOR_shared_info);
37 start_info_t *xen_start_info;
38 EXPORT_SYMBOL(xen_start_info);
41 EXPORT_SYMBOL(running_on_xen);
43 //XXX xen/ia64 copy_from_guest() is broken.
44 // This is a temporal work around until it is fixed.
45 // used by balloon.c netfront.c
47 // get_xen_guest_handle is defined only when __XEN_TOOLS__ is defined
48 // if the definition in arch-ia64.h is changed, this must be updated.
49 #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
52 ia64_xenmem_reservation_op(unsigned long op,
53 struct xen_memory_reservation* reservation__)
55 struct xen_memory_reservation reservation = *reservation__;
56 unsigned long* frame_list;
57 unsigned long nr_extents = reservation__->nr_extents;
59 get_xen_guest_handle(frame_list, reservation__->extent_start);
61 BUG_ON(op != XENMEM_increase_reservation &&
62 op != XENMEM_decrease_reservation &&
63 op != XENMEM_populate_physmap);
65 while (nr_extents > 0) {
67 volatile unsigned long dummy;
69 set_xen_guest_handle(reservation.extent_start, frame_list);
70 reservation.nr_extents = nr_extents;
72 dummy = frame_list[0];// re-install tlb entry before hypercall
73 tmp_ret = ____HYPERVISOR_memory_op(op, &reservation);
81 //XXX dirty work around for skbuff_ctor()
82 // of a non-privileged domain,
83 if ((op == XENMEM_increase_reservation ||
84 op == XENMEM_populate_physmap) &&
85 !is_initial_xendomain() &&
86 reservation.extent_order > 0)
89 frame_list += tmp_ret;
90 nr_extents -= tmp_ret;
95 EXPORT_SYMBOL(ia64_xenmem_reservation_op);
97 //XXX same as i386, x86_64 contiguous_bitmap_set(), contiguous_bitmap_clear()
98 // move those to lib/contiguous_bitmap?
99 //XXX discontigmem/sparsemem
102 * Bitmap is indexed by page number. If bit is set, the page is part of a
103 * xen_create_contiguous_region() area of memory.
105 unsigned long *contiguous_bitmap;
108 contiguous_bitmap_init(unsigned long end_pfn)
110 unsigned long size = (end_pfn + 2 * BITS_PER_LONG) >> 3;
111 contiguous_bitmap = alloc_bootmem_low_pages(size);
112 BUG_ON(!contiguous_bitmap);
113 memset(contiguous_bitmap, 0, size);
118 contiguous_bitmap_test(void* p)
120 return test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap);
124 static void contiguous_bitmap_set(
125 unsigned long first_page, unsigned long nr_pages)
127 unsigned long start_off, end_off, curr_idx, end_idx;
129 curr_idx = first_page / BITS_PER_LONG;
130 start_off = first_page & (BITS_PER_LONG-1);
131 end_idx = (first_page + nr_pages) / BITS_PER_LONG;
132 end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
134 if (curr_idx == end_idx) {
135 contiguous_bitmap[curr_idx] |=
136 ((1UL<<end_off)-1) & -(1UL<<start_off);
138 contiguous_bitmap[curr_idx] |= -(1UL<<start_off);
139 while ( ++curr_idx < end_idx )
140 contiguous_bitmap[curr_idx] = ~0UL;
141 contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1;
145 static void contiguous_bitmap_clear(
146 unsigned long first_page, unsigned long nr_pages)
148 unsigned long start_off, end_off, curr_idx, end_idx;
150 curr_idx = first_page / BITS_PER_LONG;
151 start_off = first_page & (BITS_PER_LONG-1);
152 end_idx = (first_page + nr_pages) / BITS_PER_LONG;
153 end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
155 if (curr_idx == end_idx) {
156 contiguous_bitmap[curr_idx] &=
157 -(1UL<<end_off) | ((1UL<<start_off)-1);
159 contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1;
160 while ( ++curr_idx != end_idx )
161 contiguous_bitmap[curr_idx] = 0;
162 contiguous_bitmap[curr_idx] &= -(1UL<<end_off);
166 // __xen_create_contiguous_region(), __xen_destroy_contiguous_region()
167 // are based on i386 xen_create_contiguous_region(),
168 // xen_destroy_contiguous_region()
170 /* Protected by balloon_lock. */
171 #define MAX_CONTIG_ORDER 7
172 static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
174 /* Ensure multi-page extents are contiguous in machine memory. */
176 __xen_create_contiguous_region(unsigned long vstart,
177 unsigned int order, unsigned int address_bits)
179 unsigned long error = 0;
180 unsigned long gphys = __pa(vstart);
181 unsigned long start_gpfn = gphys >> PAGE_SHIFT;
182 unsigned long num_gpfn = 1 << order;
186 unsigned long *in_frames = discontig_frames, out_frame;
188 struct xen_memory_exchange exchange = {
190 .nr_extents = num_gpfn,
196 .extent_order = order,
197 .address_bits = address_bits,
203 if (unlikely(order > MAX_CONTIG_ORDER))
206 set_xen_guest_handle(exchange.in.extent_start, in_frames);
207 set_xen_guest_handle(exchange.out.extent_start, &out_frame);
209 scrub_pages(vstart, num_gpfn);
213 /* Get a new contiguous memory extent. */
214 for (i = 0; i < num_gpfn; i++) {
215 in_frames[i] = start_gpfn + i;
217 out_frame = start_gpfn;
218 error = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
219 success = (exchange.nr_exchanged == num_gpfn);
220 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (error == 0)));
221 BUG_ON(success && (error != 0));
222 if (unlikely(error == -ENOSYS)) {
223 /* Compatibility when XENMEM_exchange is unsupported. */
224 error = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
226 BUG_ON(error != num_gpfn);
227 error = HYPERVISOR_memory_op(XENMEM_populate_physmap,
230 /* Couldn't get special memory: fall back to normal. */
231 for (i = 0; i < num_gpfn; i++) {
232 in_frames[i] = start_gpfn + i;
234 error = HYPERVISOR_memory_op(XENMEM_populate_physmap,
236 BUG_ON(error != num_gpfn);
242 contiguous_bitmap_set(start_gpfn, num_gpfn);
246 unsigned long mfn_prev = ~0UL;
247 for (i = 0; i < num_gpfn; i++) {
248 mfn = pfn_to_mfn_for_dma(start_gpfn + i);
249 if (mfn_prev != ~0UL && mfn != mfn_prev + 1) {
251 xprintk("%s:%d order %d "
252 "start 0x%lx bus 0x%lx "
254 __func__, __LINE__, order,
255 vstart, virt_to_bus((void*)vstart),
256 phys_to_machine_for_dma(gphys));
258 for (i = 0; i < num_gpfn; i++) {
259 mfn = pfn_to_mfn_for_dma(
261 xprintk("0x%lx ", mfn);
270 balloon_unlock(flags);
271 return success? 0: -ENOMEM;
275 __xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
278 unsigned long error = 0;
279 unsigned long start_gpfn = __pa(vstart) >> PAGE_SHIFT;
280 unsigned long num_gpfn = 1UL << order;
283 unsigned long *out_frames = discontig_frames, in_frame;
285 struct xen_memory_exchange exchange = {
288 .extent_order = order,
292 .nr_extents = num_gpfn,
301 if (!test_bit(start_gpfn, contiguous_bitmap))
304 if (unlikely(order > MAX_CONTIG_ORDER))
307 set_xen_guest_handle(exchange.in.extent_start, &in_frame);
308 set_xen_guest_handle(exchange.out.extent_start, out_frames);
310 scrub_pages(vstart, num_gpfn);
314 contiguous_bitmap_clear(start_gpfn, num_gpfn);
316 /* Do the exchange for non-contiguous MFNs. */
317 in_frame = start_gpfn;
318 for (i = 0; i < num_gpfn; i++) {
319 out_frames[i] = start_gpfn + i;
321 error = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
322 success = (exchange.nr_exchanged == 1);
323 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (error == 0)));
324 BUG_ON(success && (error != 0));
325 if (unlikely(error == -ENOSYS)) {
326 /* Compatibility when XENMEM_exchange is unsupported. */
327 error = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
331 error = HYPERVISOR_memory_op(XENMEM_populate_physmap,
333 BUG_ON(error != num_gpfn);
335 balloon_unlock(flags);
339 ///////////////////////////////////////////////////////////////////////////
343 #include <linux/mm.h>
344 #include <xen/interface/xen.h>
345 #include <xen/gnttab.h>
348 gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop)
354 if (flags & GNTMAP_host_map) {
355 if (flags & GNTMAP_application_map) {
356 xprintd("GNTMAP_application_map is not supported yet: flags 0x%x\n", flags);
359 if (flags & GNTMAP_contains_pte) {
360 xprintd("GNTMAP_contains_pte is not supported yet flags 0x%x\n", flags);
363 } else if (flags & GNTMAP_device_map) {
364 xprintd("GNTMAP_device_map is not supported yet 0x%x\n", flags);
365 BUG();//XXX not yet. actually this flag is not used.
372 HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
374 __u64 va1, va2, pa1, pa2;
376 if (cmd == GNTTABOP_map_grant_ref) {
378 for (i = 0; i < count; i++) {
379 gnttab_map_grant_ref_pre(
380 (struct gnttab_map_grant_ref*)uop + i);
383 va1 = (__u64)uop & PAGE_MASK;
385 if ((REGION_NUMBER(va1) == 5) &&
386 ((va1 - KERNEL_START) >= KERNEL_TR_PAGE_SIZE)) {
388 if (cmd <= GNTTABOP_transfer) {
389 static uint32_t uop_size[GNTTABOP_transfer + 1] = {
390 sizeof(struct gnttab_map_grant_ref),
391 sizeof(struct gnttab_unmap_grant_ref),
392 sizeof(struct gnttab_setup_table),
393 sizeof(struct gnttab_dump_table),
394 sizeof(struct gnttab_transfer),
396 va2 = (__u64)uop + (uop_size[cmd] * count) - 1;
399 /* maximum size of uop is 2pages */
400 BUG_ON(va2 > va1 + PAGE_SIZE);
405 return ____HYPERVISOR_grant_table_op(cmd, uop, count, pa1, pa2);
407 EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
409 ///////////////////////////////////////////////////////////////////////////
410 // PageForeign(), SetPageForeign(), ClearPageForeign()
412 struct address_space xen_ia64_foreign_dummy_mapping;
413 EXPORT_SYMBOL(xen_ia64_foreign_dummy_mapping);
415 ///////////////////////////////////////////////////////////////////////////
417 #include <linux/efi.h>
418 #include <asm/meminit.h> // for IA64_GRANULE_SIZE, GRANULEROUND{UP,DOWN}()
420 static unsigned long privcmd_resource_min = 0;
421 // Xen/ia64 currently can handle pseudo physical address bits up to
423 static unsigned long privcmd_resource_max = GRANULEROUNDDOWN((1UL << (PAGE_SHIFT * 3)) - 1);
424 static unsigned long privcmd_resource_align = IA64_GRANULE_SIZE;
427 md_end_addr(const efi_memory_desc_t *md)
429 return md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
432 #define XEN_IA64_PRIVCMD_LEAST_GAP_SIZE (1024 * 1024 * 1024UL)
434 xen_ia64_privcmd_check_size(unsigned long start, unsigned long end)
436 return (start < end &&
437 (end - start) > XEN_IA64_PRIVCMD_LEAST_GAP_SIZE);
441 xen_ia64_privcmd_init(void)
443 void *efi_map_start, *efi_map_end, *p;
445 efi_memory_desc_t *md;
446 unsigned long tmp_min;
447 unsigned long tmp_max;
448 unsigned long gap_size;
449 unsigned long prev_end;
451 if (!is_running_on_xen())
454 efi_map_start = __va(ia64_boot_param->efi_memmap);
455 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
456 efi_desc_size = ia64_boot_param->efi_memdesc_size;
458 // at first check the used highest address
459 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
462 md = p - efi_desc_size;
463 privcmd_resource_min = GRANULEROUNDUP(md_end_addr(md));
464 if (xen_ia64_privcmd_check_size(privcmd_resource_min,
465 privcmd_resource_max)) {
469 // the used highest address is too large. try to find the largest gap.
470 tmp_min = privcmd_resource_max;
474 for (p = efi_map_start;
475 p < efi_map_end - efi_desc_size;
476 p += efi_desc_size) {
478 efi_memory_desc_t* next;
479 unsigned long next_start;
482 end = md_end_addr(md);
483 if (end > privcmd_resource_max) {
486 if (end < prev_end) {
488 // Xen may pass incompletely sorted memory
492 // this order should be reversed.
495 next = p + efi_desc_size;
496 next_start = next->phys_addr;
497 if (next_start > privcmd_resource_max) {
498 next_start = privcmd_resource_max;
500 if (end < next_start && gap_size < (next_start - end)) {
502 tmp_max = next_start;
503 gap_size = tmp_max - tmp_min;
508 privcmd_resource_min = GRANULEROUNDUP(tmp_min);
509 if (xen_ia64_privcmd_check_size(privcmd_resource_min, tmp_max)) {
510 privcmd_resource_max = tmp_max;
514 privcmd_resource_min = tmp_min;
515 privcmd_resource_max = tmp_max;
516 if (!xen_ia64_privcmd_check_size(privcmd_resource_min,
517 privcmd_resource_max)) {
518 // Any large enough gap isn't found.
519 // go ahead anyway with the warning hoping that large region
520 // won't be requested.
521 printk(KERN_WARNING "xen privcmd: large enough region for privcmd mmap is not found.\n");
525 printk(KERN_INFO "xen privcmd uses pseudo physical addr range [0x%lx, 0x%lx] (%ldMB)\n",
526 privcmd_resource_min, privcmd_resource_max,
527 (privcmd_resource_max - privcmd_resource_min) >> 20);
528 BUG_ON(privcmd_resource_min >= privcmd_resource_max);
531 late_initcall(xen_ia64_privcmd_init);
533 struct xen_ia64_privcmd_entry {
535 #define INVALID_GPFN (~0UL)
539 struct xen_ia64_privcmd_range {
541 unsigned long pgoff; // in PAGE_SIZE
542 struct resource* res;
544 unsigned long num_entries;
545 struct xen_ia64_privcmd_entry entries[0];
548 struct xen_ia64_privcmd_vma {
549 int is_privcmd_mmapped;
550 struct xen_ia64_privcmd_range* range;
552 unsigned long num_entries;
553 struct xen_ia64_privcmd_entry* entries;
557 xen_ia64_privcmd_init_entry(struct xen_ia64_privcmd_entry* entry)
559 atomic_set(&entry->map_count, 0);
560 entry->gpfn = INVALID_GPFN;
564 xen_ia64_privcmd_entry_mmap(struct vm_area_struct* vma,
566 struct xen_ia64_privcmd_range* privcmd_range,
573 struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
577 if ((addr & ~PAGE_MASK) != 0 || mfn == INVALID_MFN) {
582 if (entry->gpfn != INVALID_GPFN) {
586 gpfn = (privcmd_range->res->start >> PAGE_SHIFT) + i;
588 flags = ASSIGN_writable;
589 if (pgprot_val(prot) == PROT_READ) {
590 flags = ASSIGN_readonly;
592 error = HYPERVISOR_add_physmap(gpfn, mfn, flags, domid);
597 prot = vma->vm_page_prot;
598 error = remap_pfn_range(vma, addr, gpfn, 1 << PAGE_SHIFT, prot);
600 error = HYPERVISOR_zap_physmap(gpfn, 0);
605 atomic_inc(&entry->map_count);
614 xen_ia64_privcmd_entry_munmap(struct xen_ia64_privcmd_range* privcmd_range,
617 struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
618 unsigned long gpfn = entry->gpfn;
619 //gpfn = (privcmd_range->res->start >> PAGE_SHIFT) +
620 // (vma->vm_pgoff - privcmd_range->pgoff);
623 error = HYPERVISOR_zap_physmap(gpfn, 0);
627 entry->gpfn = INVALID_GPFN;
631 xen_ia64_privcmd_entry_open(struct xen_ia64_privcmd_range* privcmd_range,
634 struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
635 if (entry->gpfn != INVALID_GPFN) {
636 atomic_inc(&entry->map_count);
638 BUG_ON(atomic_read(&entry->map_count) != 0);
643 xen_ia64_privcmd_entry_close(struct xen_ia64_privcmd_range* privcmd_range,
646 struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
647 if (entry->gpfn != INVALID_GPFN &&
648 atomic_dec_and_test(&entry->map_count)) {
649 xen_ia64_privcmd_entry_munmap(privcmd_range, i);
653 static void xen_ia64_privcmd_vma_open(struct vm_area_struct* vma);
654 static void xen_ia64_privcmd_vma_close(struct vm_area_struct* vma);
656 struct vm_operations_struct xen_ia64_privcmd_vm_ops = {
657 .open = &xen_ia64_privcmd_vma_open,
658 .close = &xen_ia64_privcmd_vma_close,
662 __xen_ia64_privcmd_vma_open(struct vm_area_struct* vma,
663 struct xen_ia64_privcmd_vma* privcmd_vma,
664 struct xen_ia64_privcmd_range* privcmd_range)
666 unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
667 unsigned long num_entries = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
670 BUG_ON(entry_offset < 0);
671 BUG_ON(entry_offset + num_entries > privcmd_range->num_entries);
673 privcmd_vma->range = privcmd_range;
674 privcmd_vma->num_entries = num_entries;
675 privcmd_vma->entries = &privcmd_range->entries[entry_offset];
676 vma->vm_private_data = privcmd_vma;
677 for (i = 0; i < privcmd_vma->num_entries; i++) {
678 xen_ia64_privcmd_entry_open(privcmd_range, entry_offset + i);
681 vma->vm_private_data = privcmd_vma;
682 vma->vm_ops = &xen_ia64_privcmd_vm_ops;
686 xen_ia64_privcmd_vma_open(struct vm_area_struct* vma)
688 struct xen_ia64_privcmd_vma* old_privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
689 struct xen_ia64_privcmd_vma* privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
690 struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
692 atomic_inc(&privcmd_range->ref_count);
693 // vm_op->open() can't fail.
694 privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL | __GFP_NOFAIL);
695 // copy original value if necessary
696 privcmd_vma->is_privcmd_mmapped = old_privcmd_vma->is_privcmd_mmapped;
698 __xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
702 xen_ia64_privcmd_vma_close(struct vm_area_struct* vma)
704 struct xen_ia64_privcmd_vma* privcmd_vma =
705 (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
706 struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
707 unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
710 for (i = 0; i < privcmd_vma->num_entries; i++) {
711 xen_ia64_privcmd_entry_close(privcmd_range, entry_offset + i);
713 vma->vm_private_data = NULL;
716 if (atomic_dec_and_test(&privcmd_range->ref_count)) {
718 for (i = 0; i < privcmd_range->num_entries; i++) {
719 struct xen_ia64_privcmd_entry* entry =
720 &privcmd_range->entries[i];
721 BUG_ON(atomic_read(&entry->map_count) != 0);
722 BUG_ON(entry->gpfn != INVALID_GPFN);
725 release_resource(privcmd_range->res);
726 kfree(privcmd_range->res);
727 vfree(privcmd_range);
732 privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
734 struct xen_ia64_privcmd_vma* privcmd_vma =
735 (struct xen_ia64_privcmd_vma *)vma->vm_private_data;
736 return (xchg(&privcmd_vma->is_privcmd_mmapped, 1) == 0);
740 privcmd_mmap(struct file * file, struct vm_area_struct * vma)
743 unsigned long size = vma->vm_end - vma->vm_start;
744 unsigned long num_entries = size >> PAGE_SHIFT;
745 struct xen_ia64_privcmd_range* privcmd_range = NULL;
746 struct xen_ia64_privcmd_vma* privcmd_vma = NULL;
747 struct resource* res = NULL;
749 BUG_ON(!is_running_on_xen());
751 BUG_ON(file->private_data != NULL);
755 vmalloc(sizeof(*privcmd_range) +
756 sizeof(privcmd_range->entries[0]) * num_entries);
757 if (privcmd_range == NULL) {
760 privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL);
761 if (privcmd_vma == NULL) {
764 privcmd_vma->is_privcmd_mmapped = 0;
766 res = kzalloc(sizeof(*res), GFP_KERNEL);
770 res->name = "Xen privcmd mmap";
771 error = allocate_resource(&iomem_resource, res, size,
772 privcmd_resource_min, privcmd_resource_max,
773 privcmd_resource_align, NULL, NULL);
777 privcmd_range->res = res;
779 /* DONTCOPY is essential for Xen as copy_page_range is broken. */
780 vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
782 atomic_set(&privcmd_range->ref_count, 1);
783 privcmd_range->pgoff = vma->vm_pgoff;
784 privcmd_range->num_entries = num_entries;
785 for (i = 0; i < privcmd_range->num_entries; i++) {
786 xen_ia64_privcmd_init_entry(&privcmd_range->entries[i]);
789 __xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
796 vfree(privcmd_range);
801 direct_remap_pfn_range(struct vm_area_struct *vma,
802 unsigned long address, // process virtual address
803 unsigned long mfn, // mfn, mfn + 1, ... mfn + size/PAGE_SIZE
806 domid_t domid) // target domain
808 struct xen_ia64_privcmd_vma* privcmd_vma =
809 (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
810 struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
811 unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
814 unsigned long offset;
816 BUG_ON(!is_running_on_xen());
819 if (prot != vm->vm_page_prot) {
824 i = (address - vma->vm_start) >> PAGE_SHIFT;
825 for (offset = 0; offset < size; offset += PAGE_SIZE) {
826 error = xen_ia64_privcmd_entry_mmap(vma, (address + offset) & PAGE_MASK, privcmd_range, entry_offset + i, mfn, prot, domid);
839 /* Called after suspend, to resume time. */
843 extern void ia64_cpu_local_tick(void);
845 /* Just trigger a tick. */
846 ia64_cpu_local_tick();