...
 
Commits (2)
......@@ -13,6 +13,7 @@ VERSION_CMD=$(PWD)/version
obj-m += pisces.o
pisces-objs := src/main.o \
src/compat.o \
src/pisces_boot_params.o \
src/boot.o \
src/covirt.o \
......@@ -29,7 +30,13 @@ pisces-objs := src/main.o \
src/pgtables.o \
src/util-hashtable.o \
src/util-queue.o \
src/v3_console.o
src/v3_console.o \
src/vmx/vmx.o \
src/vmx/vmcs.o \
src/vmx/vmx_lowlevel.o \
src/vmx/segments.o \
src/vmx/vmx_hw_info.o
ifneq ($(XPMEM),n)
EXTRA_CFLAGS += -I$(XPMEM_PATH)/include -DUSING_XPMEM
......
......@@ -572,7 +572,7 @@ boot_enclave(struct pisces_enclave * enclave)
enclave->bootmem_addr_pa >> PAGE_SHIFT);
} else {
set_enclave_launch_args(enclave,
boot_params->covirt_addr,
boot_params->covirt_launch_addr,
enclave->bootmem_addr_pa >> PAGE_SHIFT);
}
......
......@@ -6,11 +6,11 @@
* redistribute, and modify it as specified in the file "PETLAB_LICENSE".
*/
#include "vos.h"
#include "pisces.h"
uintptr_t
vos_alloc_pages(gfp_t gfp_mask,
pisces_alloc_pages(gfp_t gfp_mask,
u32 order)
{
struct page * pgs = alloc_pages(gfp_mask, order);
......@@ -25,7 +25,7 @@ vos_alloc_pages(gfp_t gfp_mask,
void
vos_free_pages(uintptr_t addr,
pisces_free_pages(uintptr_t addr,
u32 order)
{
free_pages((uintptr_t)__va(addr), order);
......@@ -33,7 +33,7 @@ vos_free_pages(uintptr_t addr,
void *
vos_kmalloc(size_t size,
pisces_kmalloc(size_t size,
gfp_t flags)
{
return kmalloc(size, flags);
......@@ -42,7 +42,7 @@ vos_kmalloc(size_t size,
void
vos_kfree(void * ptr)
pisces_kfree(void * ptr)
{
kfree(ptr);
}
......@@ -13,34 +13,40 @@
#include "covirt.h"
#include "file_io.h"
#include "enclave.h"
#include "vmx/vmx.h"
static int vmm_loaded = 0;
static void * vmm_binary = NULL;
static u32 vmm_binary_size = 0;
static int vmm_loaded = 0;
static void * vmm_binary = NULL;
static u32 vmm_binary_size = 0;
static void * launch_binary = NULL;
static u32 launch_binary_size = 0;
int
covirt_load_vmm(int vmm_fd)
covirt_load_vmm(int launch_fd)
{
struct file * vmm_file = fget(vmm_fd);
struct file * launch_file = fget(launch_fd);
u64 bytes_read = 0;
int ret = 0;
vmm_binary_size = file_size(vmm_file);
vmm_binary = kmalloc(vmm_binary_size, GFP_KERNEL);
launch_binary_size = file_size(launch_file);
launch_binary = kmalloc(launch_binary_size, GFP_KERNEL);
while (bytes_read < vmm_binary_size) {
while (bytes_read < launch_binary_size) {
ret = file_read(vmm_file,
(void *)(vmm_binary + bytes_read),
vmm_binary_size - bytes_read,
ret = file_read(launch_file,
(void *)(launch_binary + bytes_read),
launch_binary_size - bytes_read,
bytes_read);
if (ret <= 0) {
printk(KERN_ERR "Error reading VMM binary. Only read %llu bytes.\n",
ERROR("Error reading Launch binary. Only read %llu bytes.\n",
bytes_read);
goto err;
......@@ -50,40 +56,70 @@ covirt_load_vmm(int vmm_fd)
}
fput(vmm_file);
fput(launch_file);
/* Go ahead and initialize VMX at this point */
covirt_init_vmx();
vmm_loaded = 1;
return 0;
err:
if (vmm_binary) kfree(vmm_binary);
if (launch_binary) kfree(launch_binary);
vmm_binary_size = 0;
launch_binary_size = 0;
return -1;
}
u32
covirt_get_vmm_size(void)
covirt_get_launch_size(void)
{
if (vmm_loaded == 0) {
return 0;
}
return vmm_binary_size;
return launch_binary_size;
}
uintptr_t
covirt_get_vmm_addr(void)
covirt_get_launch_addr(void)
{
if (vmm_loaded == 0) {
return (uintptr_t)0;
}
return (uintptr_t)vmm_binary;
return (uintptr_t)launch_binary;
}
int
setup_covirt(struct pisces_enclave * enclave,
u32 cpu_id)
{
struct covirt_core_state * core_state = &(enclave->covirt_cores[cpu_id]);
int ret = 0;
if (covirt_is_vmx_capable() == 0) {
ERROR("System is not VMX capable\n");
return -1;
}
ret = covirt_setup_vmx_core(core_state);
if (ret == -1) {
ERROR("Could not setup VMX\n");
return -1;
}
return 0;
}
......@@ -12,14 +12,26 @@
#include <linux/types.h>
#include "pisces.h"
struct pisces_enclave;
struct covirt_core_state {
u64 guest_vmcs_ptr;
u64 host_vmcs_ptr;
} __attribute__((packed));
int
covirt_load_vmm(int vmm_fd);
u32 covirt_get_vmm_size(void);
uintptr_t covirt_get_vmm_addr(void);
u32 covirt_get_launch_size(void);
uintptr_t covirt_get_launch_addr(void);
int setup_covirt(struct pisces_enclave * enclave, u32 cpu_id);
#endif
......
......@@ -20,6 +20,7 @@
#include <linux/version.h>
#include <linux/pci.h>
#include "pisces.h"
#include "pisces_ioctl.h"
#include "enclave.h"
#include "enclave_ctrl.h"
......@@ -27,6 +28,7 @@
#include "pisces_boot_params.h"
#include "pisces_xpmem.h"
#include "covirt.h"
#include "pgtables.h"
......@@ -627,6 +629,15 @@ static int
pisces_enclave_launch(struct pisces_enclave * enclave)
{
if (enclave->covirt_enabled) {
if (setup_covirt(enclave, 0) == -1) {
printk(KERN_ERR "Error Setting up Covirt environment\n");
return -1;
}
}
if (setup_boot_params(enclave) == -1) {
printk(KERN_ERR "Error setting up boot environment\n");
return -1;
......
......@@ -22,6 +22,7 @@
#include "enclave_fs.h"
#include "enclave_pci.h"
#include "enclave_iommu.h"
#include "covirt.h"
#define ENCLAVE_LOADED 1
#define ENCLAVE_RUNNING 2
......@@ -74,6 +75,8 @@ struct pisces_enclave {
u32 memdesc_num;
struct covirt_core_state covirt_cores[MAX_ENCLAVE_CPUS];
int covirt_enabled;
};
......
......@@ -21,6 +21,7 @@
#include <linux/version.h>
#include <linux/uaccess.h>
#include "pisces.h"
#include "pisces_ioctl.h" /* device file ioctls*/
#include "pisces_mod.h"
#include "enclave.h"
......
/*
* Copyright (c) 2019, Jack Lange <jacklange@cs.pitt.edu>
* All rights reserved.
*
* This is free software. You are permitted to use,
* redistribute, and modify it as specified in the file "PETLAB_LICENSE".
*/
#ifndef _PISCES_H_
#define _PISCES_H_
#include <linux/slab.h>
#include <linux/mm.h>
#define MAX_ENCLAVES 128
#define MAX_ENCLAVE_CPUS 32
uintptr_t pisces_alloc_pages(gfp_t gfp_mask, u32 order);
void pisces_free_pages(uintptr_t addr, u32 order);
void * pisces_kmalloc(size_t size, gfp_t flags);
void pisces_kfree(void * ptr);
// The following macros are for printing in the linux module
#define pisces_printk(fmt, args...) \
do { \
printk("PISCES>: " fmt, ##args); \
} while (0)
#define ERROR(fmt, args...) \
do { \
printk(KERN_ERR "PISCSE> %s(%d): " fmt, __FILE__, __LINE__, ##args); \
} while (0)
#define WARNING(fmt, args...) \
do { \
printk(KERN_WARNING "PISCSES>: " fmt, ##args); \
} while (0)
#define DEBUG(fmt, args...) \
do { \
printk(KERN_DEBUG "PISCES>: " fmt, ##args); \
} while (0)
#endif
......@@ -279,6 +279,7 @@ setup_boot_params(struct pisces_enclave * enclave)
/*
* Covirt launch code
*/
......@@ -286,18 +287,19 @@ setup_boot_params(struct pisces_enclave * enclave)
if (enclave->covirt_enabled) {
offset = ALIGN(offset, PAGE_SIZE_4KB);
boot_params->covirt_addr = __pa(base_addr + offset);
boot_params->covirt_size = covirt_get_vmm_size();
boot_params->covirt_launch_addr = __pa(base_addr + offset);
boot_params->covirt_launch_size = covirt_get_launch_size();
memcpy((void *)(__va(boot_params->covirt_addr)),
(void *)covirt_get_vmm_addr(),
boot_params->covirt_size);
memcpy((void *)(__va(boot_params->covirt_launch_addr)),
(void *)covirt_get_launch_addr(),
boot_params->covirt_launch_size);
offset += PAGE_SIZE_4KB;
printk("\tCovirt image loaded at %p (size=%llu)\n",
(void *)(boot_params->covirt_addr),
boot_params->covirt_size);
printk("\tCovirt Launcher loaded at %p (size=%llu)\n",
(void *)(boot_params->covirt_launch_addr),
boot_params->covirt_launch_size);
}
......
......@@ -27,8 +27,9 @@ struct pisces_enclave;
* 1. boot parameters // 4KB aligned
* -> Trampoline code sits at the start of this structure
* 2. Console ring buffer (64KB) // 4KB aligned
* 3. To enclave CMD buffer // (4KB)
* 4. From enclave CMD buffer // (4KB)
* 3. CMD/CTRL buffer // (4KB)
* 4. LongCall buffer // (4KB)
* 5. XPMEM buffer // (4KB)
* 5. Covirt Launch code // (4KB)
* 6. kernel image // bootmem + 2MB (MUST be loaded at the 2MB offset)
* 7. initrd // 2M aligned
......@@ -80,9 +81,10 @@ struct pisces_boot_params {
// cmd_line
char cmd_line[1024];
// covirt launch code
u64 covirt_addr;
u64 covirt_size;
u64 covirt_launch_addr;
u64 covirt_launch_size;
// kernel
u64 kernel_addr;
......
......@@ -6,8 +6,8 @@
* redistribute, and modify it as specified in the file "PETLAB_LICENSE".
*/
#ifndef _PISCES_H_
#define _PISCES_H_
#ifndef _PISCES_IOCTL_H_
#define _PISCES_IOCTL_H_
/*
* Name of the device file
......@@ -15,7 +15,6 @@
#define DEVICE_NAME "pisces"
#define PISCES_PROC_DIR "pisces"
#define MAX_ENCLAVES 128
/* Pisces global cmds */
#define PISCES_LOAD_IMAGE 1001
......
......@@ -7,316 +7,43 @@
*/
#include "segments.h"
#include "vmm_lowlevel.h"
#include "../pisces.h"
static void
vos_print_segment(char * name,
struct vos_segment * seg)
covirt_print_segment(char * name,
struct vmx_segment * seg)
{
vos_lnx_printk("\t%s: Sel=%x, base=%p, limit=%x long_mode=%d, db=%d, type=%x )\n",
name,
seg->selector,
(void *)(uintptr_t)seg->base,
seg->limit,
seg->long_mode,
seg->db,
seg->type);
vos_lnx_printk("\t\tSys=%d, dpl=%x, P=%d, avail=%d, gran.=%d, unusable=%d\n",
seg->system,
seg->dpl,
seg->present,
seg->avail,
seg->granularity,
seg->unusable);
DEBUG("\t%s: Sel=%x, base=%p, limit=%x long_mode=%d, db=%d, type=%x )\n",
name,
seg->selector,
(void *)(uintptr_t)seg->base,
seg->limit,
seg->long_mode,
seg->db,
seg->type);
DEBUG("\t\tSys=%d, dpl=%x, P=%d, avail=%d, gran.=%d, unusable=%d\n",
seg->system,
seg->dpl,
seg->present,
seg->avail,
seg->granularity,
seg->unusable);
}
void
vos_print_segments(struct vos_segments * segs)
covirt_print_segments(struct vmx_segments * segs)
{
struct vos_segment * seg_ptr = (struct vos_segment *)segs;
struct vmx_segment * seg_ptr = (struct vmx_segment *)segs;
char * seg_names[] = {"CS", "DS" , "ES", "FS", "GS", "SS" , "LDTR", "GDTR", "IDTR", "TR", NULL};
int i = 0;
vos_lnx_printk("Segments\n");
DEBUG("Segments\n");
for (i = 0; seg_names[i] != NULL; i++) {
vos_print_segment(seg_names[i], &seg_ptr[i]);
}
}
static int
parse_seg_desc(struct x86_segment * seg,
struct vos_segment * vos_seg)
{
uintptr_t base = seg->base_lo + (seg->base_hi << 24);
uintptr_t limit = seg->limit_lo + (seg->limit_hi << 16);
vos_seg->base = base;
vos_seg->limit = limit;
vos_seg->type = seg->type;
vos_seg->system = seg->system;
vos_seg->dpl = seg->dpl;
vos_seg->present = seg->present;
vos_seg->avail = seg->avail;
vos_seg->long_mode = seg->long_mode;
vos_seg->db = seg->db;
vos_seg->granularity = seg->granularity;
if (vos_seg->present == 0) {
vos_lnx_printk("Marking Segment unusable\n");
vos_seg->unusable = 1;
}
return 0;
}
static int
parse_ext_seg_desc(struct x86_ext_segment * seg,
struct vos_segment * vos_seg)
{
u64 base = 0;
uintptr_t limit = seg->limit_lo | (seg->limit_hi << 16);
base = seg->base_hi;
base <<= 24;
base |= seg->base_lo;
vos_seg->base = base;
vos_seg->limit = limit;
vos_seg->type = seg->type;
vos_seg->system = seg->system;
vos_seg->dpl = seg->dpl;
vos_seg->present = seg->present;
vos_seg->avail = seg->avail;
vos_seg->long_mode = seg->long_mode;
vos_seg->db = seg->db;
vos_seg->granularity = seg->granularity;
if (vos_seg->present == 0) {
vos_lnx_printk("Marking Segment unusable\n");
vos_seg->unusable = 1;
covirt_print_segment(seg_names[i], &seg_ptr[i]);
}
return 0;
}
int
vos_get_host_segments(struct vos_segments * segs)
{
uintptr_t selector_val = 0;
struct x86_seg_selector * selector = (struct x86_seg_selector *)&selector_val;
struct x86_segment * gdt_arr = NULL;
struct x86_segment * ldt_arr = NULL;
struct {
u16 limit;
uintptr_t base;
} __attribute__((packed)) tmp_seg;
memset(segs, 0, sizeof(struct vos_segments));
vos_lnx_printk("Host Segments\n");
/* GDTR */
__asm__ __volatile__("sgdt (%0);"
:
: "q"(&tmp_seg)
: "memory"
);
segs->gdtr.base = tmp_seg.base;
segs->gdtr.limit = tmp_seg.limit;
gdt_arr = (struct x86_segment *)(segs->gdtr.base);
vos_lnx_printk("GDTR: [base=%p] [limit=%d]\n", (void *)segs->gdtr.base, segs->gdtr.limit);
/* IDTR */
__asm__ __volatile__("sidt (%0);"
:
: "q"(&tmp_seg)
: "memory"
);
segs->idtr.base = tmp_seg.base;
segs->idtr.limit = tmp_seg.limit;
vos_lnx_printk("IDTR: [base=%p] [limit=%d]\n", (void *)segs->idtr.base, segs->idtr.limit);
/* LDTR */
__asm__ __volatile__("sldt (%0);"
:
: "q"(&selector_val)
: "memory"
);
if (selector_val != 0) {
segs->ldtr.selector = selector_val;
parse_ext_seg_desc((struct x86_ext_segment *)&gdt_arr[selector->index], &(segs->ldtr));
ldt_arr = (struct x86_segment *)(segs->ldtr.base);
} else {
segs->ldtr.unusable = 1;
}
vos_lnx_printk("LDTR: [base=%p]\n", (void *)segs->ldtr.base);
/* TR */
__asm__ __volatile__("str (%0);"
:
: "q"(&selector_val)
: "memory"
);
if (selector_val != 0) {
segs->tr.selector = selector_val;
if (selector->tbl_index == 1) {
parse_ext_seg_desc((struct x86_ext_segment *)&ldt_arr[selector->index], &(segs->tr));
} else {
parse_ext_seg_desc((struct x86_ext_segment *)&gdt_arr[selector->index], &(segs->tr));
}
} else {
segs->tr.unusable = 1;
}
vos_lnx_printk("TR: [selector=%d] [base=%p]\n", (u32)selector_val, (void *)segs->tr.base);
/* CS */
__asm__ __volatile__ ("movq %%cs, %0; "
: "=q"(selector_val)
: );
if (selector_val != 0) {
segs->cs.selector = selector_val;
if (selector->tbl_index == 1) {
parse_seg_desc(&ldt_arr[selector->index], &(segs->cs));
} else {
parse_seg_desc(&gdt_arr[selector->index], &(segs->cs));
}
} else {
segs->cs.unusable = 1;
}
vos_lnx_printk("CS: [selector=%d] [base=%p]\n", (u32)selector_val, (void *)segs->cs.base);
/* SS */
__asm__ __volatile__ ( "movq %%ss, %0; "
: "=q"(selector_val)
: );
if (selector_val != 0) {
segs->ss.selector = selector_val;
if (selector->tbl_index == 1) {
parse_seg_desc(&ldt_arr[selector->index], &(segs->ss));
} else {
parse_seg_desc(&gdt_arr[selector->index], &(segs->ss));
}
} else {
segs->ss.unusable = 1;
}
vos_lnx_printk("SS: [selector=%d] [base=%p]\n", (u32)selector_val, (void *)segs->ss.base);
/* DS */
__asm__ __volatile__ ( "movq %%ds, %0; "
: "=q"(selector_val)
: );
if (selector_val != 0) {
segs->ds.selector = selector_val;
if (selector->tbl_index == 1) {
parse_seg_desc(&ldt_arr[selector->index], &(segs->ds));
} else {
parse_seg_desc(&gdt_arr[selector->index], &(segs->ds));
}
} else {
segs->ds.unusable = 1;
}
vos_lnx_printk("DS: [selector=%d] [base=%p]\n", (u32)selector_val, (void *)segs->ds.base);
/* ES */
__asm__ __volatile__ ( "movq %%es, %0; "
: "=q"(selector_val)
: );
if (selector_val != 0) {
segs->es.selector = selector_val;
if (selector->tbl_index == 1) {
parse_seg_desc(&ldt_arr[selector->index], &(segs->es));
} else {
parse_seg_desc(&gdt_arr[selector->index], &(segs->es));
}
} else {
segs->es.unusable = 1;
}
vos_lnx_printk("ES: [selector=%d] [base=%p]\n", (u32)selector_val, (void *)segs->es.base);
/* FS */
__asm__ __volatile__ ( "movq %%fs, %0; "
: "=q"(selector_val)
: );
if (selector_val != 0) {
segs->fs.selector = selector_val;
if (selector->tbl_index == 1) {
parse_seg_desc(&ldt_arr[selector->index], &(segs->fs));
} else {
parse_seg_desc(&gdt_arr[selector->index], &(segs->fs));
}
} else {
segs->fs.unusable = 1;
}
vos_lnx_printk("FS: [selector=%d] [base=%p]\n", (u32)selector_val, (void *)segs->fs.base);
/* GS */
__asm__ __volatile__ ( "movq %%gs, %0; "
: "=q"(selector_val)
: );
if (selector_val != 0) {
segs->gs.selector = selector_val;
if (selector->tbl_index == 1) {
parse_seg_desc(&ldt_arr[selector->index], &(segs->gs));
} else {
parse_seg_desc(&gdt_arr[selector->index], &(segs->gs));
}
} else {
segs->gs.unusable = 1;
}
vos_lnx_printk("GS: [selector=%d] [base=%p]\n", (u32)selector_val, (void *)segs->gs.base);
vos_print_segments(segs);
return 0;
}
......@@ -9,11 +9,15 @@
#ifndef __SEGMENTS_H__
#define __SEGMENTS_H__
#include <linux/types.h>
#include "vos.h"
#define VMM_NULL_SELECTOR 0
struct vos_segment {
struct vmx_segment {
u16 selector;
u32 limit;
u64 base;
......@@ -30,21 +34,21 @@ struct vos_segment {
struct vos_segments {
struct vmx_segments {
union {
struct vos_segment seg_arr[10];
struct vmx_segment seg_arr[10];
struct {
struct vos_segment cs;
struct vos_segment ds;
struct vos_segment es;
struct vos_segment fs;
struct vos_segment gs;
struct vos_segment ss;
struct vos_segment ldtr;
struct vos_segment gdtr;
struct vos_segment idtr;
struct vos_segment tr;
struct vmx_segment cs;
struct vmx_segment ds;
struct vmx_segment es;
struct vmx_segment fs;
struct vmx_segment gs;
struct vmx_segment ss;
struct vmx_segment ldtr;
struct vmx_segment gdtr;
struct vmx_segment idtr;
struct vmx_segment tr;
} __attribute__((packed));
} __attribute__((packed));
} __attribute__((packed));
......@@ -54,8 +58,7 @@ struct vos_segments {
int vos_get_host_segments(struct vos_segments * segs);
void vos_print_segments(struct vos_segments * segs);
void covirt_print_segments(struct vmx_segments * segs);
#endif
......@@ -31,8 +31,8 @@ typedef enum { BASE = VMCS_GUEST_ES_BASE,
static int
vos_read_vmcs_segment(struct vos_segment * seg,
vmcs_seg_offsets_t seg_type)
covirt_read_vmcs_segment(struct vmx_segment * seg,
vmcs_seg_offsets_t seg_type)
{
vmcs_field_t selector = VMCS_GUEST_ES_SELECTOR + seg_type;
vmcs_field_t base = VMCS_GUEST_ES_BASE + seg_type;
......@@ -50,14 +50,14 @@ vos_read_vmcs_segment(struct vos_segment * seg,
check_vmcs_read(access, &(vmcs_seg.access.val));
}
vos_vmcsseg_to_seg(&vmcs_seg, seg);
covirt_vmcsseg_to_seg(&vmcs_seg, seg);
return 0;
}
static int
vos_write_vmcs_segment(struct vos_segment * seg,
vmcs_seg_offsets_t seg_type)
covirt_write_vmcs_segment(struct vmx_segment * seg,
vmcs_seg_offsets_t seg_type)
{
vmcs_field_t selector = VMCS_GUEST_ES_SELECTOR + seg_type;
vmcs_field_t base = VMCS_GUEST_ES_BASE + seg_type;
......@@ -65,7 +65,7 @@ vos_write_vmcs_segment(struct vos_segment * seg,
vmcs_field_t access = VMCS_GUEST_ES_ACCESS + seg_type;
struct vmcs_segment vmcs_seg;
vos_seg_to_vmcsseg(seg, &vmcs_seg);
covirt_seg_to_vmcsseg(seg, &vmcs_seg);
check_vmcs_write(limit, vmcs_seg.limit);
check_vmcs_write(base, vmcs_seg.base);
......@@ -79,45 +79,45 @@ vos_write_vmcs_segment(struct vos_segment * seg,
}
int
vos_read_vmcs_segments(struct vos_segments * segs)
covirt_read_vmcs_segments(struct vmx_segments * segs)
{
vos_read_vmcs_segment(&(segs->cs), CS);
vos_read_vmcs_segment(&(segs->ds), DS);
vos_read_vmcs_segment(&(segs->es), ES);
vos_read_vmcs_segment(&(segs->fs), FS);
vos_read_vmcs_segment(&(segs->gs), GS);
vos_read_vmcs_segment(&(segs->ss), SS);
vos_read_vmcs_segment(&(segs->ldtr), LDTR);
vos_read_vmcs_segment(&(segs->gdtr), GDTR);
vos_read_vmcs_segment(&(segs->idtr), IDTR);
vos_read_vmcs_segment(&(segs->tr), TR);
covirt_read_vmcs_segment(&(segs->cs), CS);
covirt_read_vmcs_segment(&(segs->ds), DS);
covirt_read_vmcs_segment(&(segs->es), ES);
covirt_read_vmcs_segment(&(segs->fs), FS);
covirt_read_vmcs_segment(&(segs->gs), GS);
covirt_read_vmcs_segment(&(segs->ss), SS);
covirt_read_vmcs_segment(&(segs->ldtr), LDTR);
covirt_read_vmcs_segment(&(segs->gdtr), GDTR);
covirt_read_vmcs_segment(&(segs->idtr), IDTR);
covirt_read_vmcs_segment(&(segs->tr), TR);
return 0;
}
int
vos_write_vmcs_segments(struct vos_segments * segs)
covirt_write_vmcs_segments(struct vmx_segments * segs)
{
vos_write_vmcs_segment(&(segs->cs), CS);
vos_write_vmcs_segment(&(segs->ds), DS);
vos_write_vmcs_segment(&(segs->es), ES);
vos_write_vmcs_segment(&(segs->fs), FS);
vos_write_vmcs_segment(&(segs->gs), GS);
vos_write_vmcs_segment(&(segs->ss), SS);
vos_write_vmcs_segment(&(segs->ldtr), LDTR);
vos_write_vmcs_segment(&(segs->gdtr), GDTR);
vos_write_vmcs_segment(&(segs->idtr), IDTR);
vos_write_vmcs_segment(&(segs->tr), TR);
covirt_write_vmcs_segment(&(segs->cs), CS);
covirt_write_vmcs_segment(&(segs->ds), DS);
covirt_write_vmcs_segment(&(segs->es), ES);
covirt_write_vmcs_segment(&(segs->fs), FS);
covirt_write_vmcs_segment(&(segs->gs), GS);
covirt_write_vmcs_segment(&(segs->ss), SS);
covirt_write_vmcs_segment(&(segs->ldtr), LDTR);
covirt_write_vmcs_segment(&(segs->gdtr), GDTR);
covirt_write_vmcs_segment(&(segs->idtr), IDTR);
covirt_write_vmcs_segment(&(segs->tr), TR);
return 0;
}
void
vos_vmcsseg_to_seg(struct vmcs_segment * vmcs_seg,
struct vos_segment * seg)
covirt_vmcsseg_to_seg(struct vmcs_segment * vmcs_seg,
struct vmx_segment * seg)
{
memset(seg, 0, sizeof(struct vos_segment));
memset(seg, 0, sizeof(struct vmx_segment));
seg->selector = vmcs_seg->selector;
seg->limit = vmcs_seg->limit;
......@@ -136,8 +136,8 @@ vos_vmcsseg_to_seg(struct vmcs_segment * vmcs_seg,
}
void
vos_seg_to_vmcsseg(struct vos_segment * seg,
struct vmcs_segment * vmcs_seg)
covirt_seg_to_vmcsseg(struct vmx_segment * seg,
struct vmcs_segment * vmcs_seg)
{
memset(vmcs_seg, 0, sizeof(struct vmcs_segment));
......@@ -168,20 +168,20 @@ vos_seg_to_vmcsseg(struct vos_segment * seg,
static inline void
print_vmcs_field(vmcs_field_t vmcs_index)
{
int len = vos_vmcs_get_field_len(vmcs_index);
int len = covirt_vmcs_get_field_len(vmcs_index);
uintptr_t val;
if (vmcs_read(vmcs_index, &val) != VMX_SUCCESS) {
ERROR("VMCS_READ error for %s\n", vos_vmcs_field_to_str(vmcs_index));
ERROR("VMCS_READ error for %s\n", covirt_vmcs_field_to_str(vmcs_index));
return;
};
if (len == 2) {
DEBUG("\t%s: 0x%.4x\n", vos_vmcs_field_to_str(vmcs_index), (u16)val);
DEBUG("\t%s: 0x%.4x\n", covirt_vmcs_field_to_str(vmcs_index), (u16)val);
} else if (len == 4) {
DEBUG("\t%s: 0x%.8x\n", vos_vmcs_field_to_str(vmcs_index), (u32)val);
DEBUG("\t%s: 0x%.8x\n", covirt_vmcs_field_to_str(vmcs_index), (u32)val);
} else if (len == 8) {
DEBUG("\t%s: 0x%p\n", vos_vmcs_field_to_str(vmcs_index), (void *)(uintptr_t)val);
DEBUG("\t%s: 0x%p\n", covirt_vmcs_field_to_str(vmcs_index), (void *)(uintptr_t)val);
}
}
......@@ -411,7 +411,7 @@ print_exec_ctrls(void)
static void
print_ept_state(void)
{
vos_lnx_printk("VMCS EPT INFO\n");
pisces_printk("VMCS EPT INFO\n");
// if enable vpid
print_vmcs_field(VMCS_VPID);
......@@ -495,7 +495,7 @@ print_exit_info(void)
}
void
vos_print_vmcs(void)
covirt_print_vmcs(void)
{
print_vmcs_field(VMCS_LINK_PTR);
......@@ -518,7 +518,7 @@ vos_print_vmcs(void)
* It doesn't get much uglier than this... Thanks Intel
*/
int
vos_vmcs_get_field_len(vmcs_field_t field)
covirt_vmcs_get_field_len(vmcs_field_t field)
{
struct vmcs_field_encoding * enc = (struct vmcs_field_encoding *)&field;
......@@ -716,7 +716,7 @@ static const char VMCS_HOST_RIP_STR[] = "HOST_RIP";
const char *
vos_vmcs_field_to_str(vmcs_field_t field)
covirt_vmcs_field_to_str(vmcs_field_t field)
{
switch (field) {
case VMCS_VPID: return VMCS_VPID_STR;
......
......@@ -10,7 +10,8 @@
#define __VMCS_H__
#include "vos.h"
#include <linux/types.h>
#include "segments.h"
/* VM-Exit Controls */
......@@ -354,20 +355,18 @@ struct vmcs_data {
} __attribute__((packed));
struct vos_core;
int vos_vmcs_get_field_len(vmcs_field_t field);
int covirt_vmcs_get_field_len(vmcs_field_t field);
const char * vos_vmcs_field_to_str(vmcs_field_t field);
const char * covirt_vmcs_field_to_str(vmcs_field_t field);
void vos_print_vmcs(void);
void covirt_print_vmcs(void);
int vos_read_vmcs_segments(struct vos_segments * segs);
int vos_write_vmcs_segments(struct vos_segments * segs);
int covirt_read_vmcs_segments(struct vmx_segments * segs);
int covirt_write_vmcs_segments(struct vmx_segments * segs);
void vos_vmcsseg_to_seg(struct vmcs_segment * vmcs_seg, struct vos_segment * seg);
void vos_seg_to_vmcsseg(struct vos_segment * seg, struct vmcs_segment * vmcs_seg);
void covirt_vmcsseg_to_seg(struct vmcs_segment * vmcs_seg, struct vmx_segment * seg);
void covirt_seg_to_vmcsseg(struct vmx_segment * seg, struct vmcs_segment * vmcs_seg);
#endif
......@@ -5,11 +5,12 @@
* This is free software. You are permitted to use,
* redistribute, and modify it as specified in the file "PETLAB_LICENSE".
*/
#include "vos.h"
#ifndef __VMM_LOWLEVEL_H__
#define __VMM_LOWLEVEL_H__
#include <linux/types.h>
#define CPUID_FEATURE_IDS 0x00000001
#define CPUID_EXT_FEATURE_IDS 0x80000001
......@@ -184,11 +185,11 @@ struct x86_intr_gate {
static void __inline__
vos_cpuid(u32 target,
u32 * eax,
u32 * ebx,
u32 * ecx,
u32 * edx)
covirt_cpuid(u32 target,
u32 * eax,
u32 * ebx,
u32 * ecx,
u32 * edx)
{
__asm__ __volatile__ (
......@@ -201,9 +202,9 @@ vos_cpuid(u32 target,
static void __inline__
vos_set_msr(u32 msr,
u32 high_byte,
u32 low_byte)
covirt_set_msr(u32 msr,
u32 high_byte,
u32 low_byte)
{
__asm__ __volatile__ (
"wrmsr"
......@@ -215,9 +216,9 @@ vos_set_msr(u32 msr,
static void __inline__
vos_get_msr(u32 msr,
u32 * high_byte,
u32 * low_byte)
covirt_get_msr(u32 msr,
u32 * high_byte,
u32 * low_byte)
{
__asm__ __volatile__ (
"rdmsr"
......@@ -228,49 +229,7 @@ vos_get_msr(u32 msr,
static void __inline__
vos_enable_ints( void )
{
__asm__ __volatile__ ("sti");
}
static void __inline__
vos_disable_ints( void )
{
__asm__ __volatile__ ("cli");
}
static uintptr_t __inline__
vos_irq_save( void )
{
uintptr_t state;
__asm__ __volatile__ ("pushfq \n\t"
"popq %0 \n\t"
"cli \n\t"
:"=g" (state)
:
:"memory"
);
return state;
}
static void __inline__
vos_irq_restore(uintptr_t state)
{
__asm__ __volatile__("pushq %0 \n\t"
"popfq \n\t"
:
:"g" (state)
:"memory", "cc"
);
}
......
This diff is collapsed.
......@@ -10,13 +10,11 @@
#ifndef __VMX_H__
#define __VMX_H__
#include "vos.h"
#include "vmcs.h"
#include "vmx_lowlevel.h"
#include "../pisces.h"
struct vos_core;
struct vos_vmm;
struct vos_vm;
#define VMX_SUCCESS 0
#define VMX_FAIL_INVALID 1
......@@ -160,25 +158,6 @@ struct vmx_data {
#define VMX_SUCCESS 0
#define VMX_FAIL_INVALID 1
#define VMX_FAIL_VALID 2
int enable_vmx(void);
int vmx_on(uintptr_t vmxon_ptr);
int vmx_off(void);
int vmcs_clear(uintptr_t vmcs_ptr);
uint64_t vmcs_store(void);
int vmcs_load(uintptr_t vmcs_ptr);
int vmcs_read(vmcs_field_t vmcs_field, void * dst);
int vmcs_write(vmcs_field_t vmcs_field, uintptr_t value);
static inline int
check_vmcs_write(vmcs_field_t field,
......@@ -189,7 +168,7 @@ check_vmcs_write(vmcs_field_t field,
ret = vmcs_write(field, val);
if (ret != VMX_SUCCESS) {
ERROR("VMWRITE error on %s!: %d\n", vos_vmcs_field_to_str(field), ret);
ERROR("VMWRITE error on %s!: %d\n", covirt_vmcs_field_to_str(field), ret);
return 1;
}
......@@ -205,50 +184,20 @@ check_vmcs_read(vmcs_field_t field,
ret = vmcs_read(field, val);
if (ret != VMX_SUCCESS) {
ERROR("VMREAD error on %s!: %d\n", vos_vmcs_field_to_str(field), ret);
ERROR("VMREAD error on %s!: %d\n", covirt_vmcs_field_to_str(field), ret);
}
return ret;
}
struct covirt_core_state;
int vos_is_vmx_capable( void );
int vos_init_vmx( void );
void vos_deinit_vmx_cpu(int cpu_id);
int vmx_launch( struct vos_vmm * vmm,
struct vos_vm * vm,
struct vos_core * core );
#define VMX_VPID_EXTENT_INDIVIDUAL_ADDR 0
#define VMX_VPID_EXTENT_SINGLE_CONTEXT 1
#define VMX_VPID_EXTENT_ALL_CONTEXT 2
#define VMX_VPID_EXTENT_SINGLE_W_GLBLS_CONTEXT 3
int vmx_invvpid(int ext, int vpid, uintptr_t gva);
#define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0
#define VMX_EPT_EXTENT_SINGLE_CONTEXT 1
#define VMX_EPT_EXTENT_ALL_CONTEXT 2
int vmx_invept(int ext, u64 eptp);
/*
* NOTE: These functions should be used in place of the invept/invvpid functions above
*/
#if 0
int vmx_flush_addr(struct vos_core * core, uintptr_t addr);
int vmx_flush_core_tlb(struct vos_core * core);
#endif
int covirt_is_vmx_capable( void );
int covirt_is_vmx_enabled( void );
int covirt_setup_vmx_core( struct covirt_core_state * core_state );
int vmx_flush_tlb(void);
int covirt_init_vmx( void );
#endif
......
......@@ -10,6 +10,8 @@
#include "vmx_hw_info.h"
#include "../pisces.h"
// Intel VMX Feature MSRs
/* These fields contain the hardware feature sets supported by the local CPU */
......@@ -17,7 +19,7 @@ static struct vmx_hw_info hw_info;
struct vmx_hw_info *
vos_get_vmx_hw_info(void)
covirt_get_vmx_hw_info(void)
{
return &(hw_info);
}
......@@ -32,11 +34,11 @@ get_ex_ctrl_caps(struct vmx_ctrl_field * field,
u32 true_0 = 0; /* Bit is 1 => MB1 */
u32 true_1 = 0; /* Bit is 0 => MBZ */
vos_get_msr(old_msr, &old_1, &old_0);
covirt_get_msr(old_msr, &old_1, &old_0);
field->def_val = old_0;
if (hw_info.basic_info.def1_maybe_0) {
vos_get_msr(true_msr, &true_1, &true_0);
covirt_get_msr(true_msr, &true_1, &true_0);
} else {
true_0 = old_0;
true_1 = old_1;
......@@ -56,7 +58,7 @@ get_ctrl_caps(struct vmx_ctrl_field * field,
u32 mbz = 0; /* (32-64) Bit is 0 => MBZ */
u32 mb1 = 0; /* (0-31) Bit is 1 => MB1 */
vos_get_msr(msr, &mbz, &mb1);
covirt_get_msr(msr, &mbz, &mb1);
field->def_val = mb1;
field->req_val = mb1;
......@@ -75,8 +77,8 @@ get_cr_fields(struct vmx_cr_field * field,
struct x86_msr mbz; /* Bit is 0 => MBZ */
struct x86_msr mb1; /* Bit is 0 => MBZ */
vos_get_msr(fixed_1_msr, &(mbz.hi), &(mbz.lo));
vos_get_msr(fixed_0_msr, &(mb1.hi), &(mb1.lo));
covirt_get_msr(fixed_1_msr, &(mbz.hi), &(mbz.lo));
covirt_get_msr(fixed_0_msr, &(mb1.hi), &(mb1.lo));
field->def_val = mb1.value;
field->req_val = mb1.value;
......@@ -89,14 +91,14 @@ get_cr_fields(struct vmx_cr_field * field,
int
vos_init_vmx_hw( void )
covirt_init_vmx_hw( void )
{
// extern vos_cpu_arch_t vos_cpu_types[];
memset(&hw_info, 0, sizeof(struct vmx_hw_info));
vos_get_msr(VMX_BASIC_MSR, &(hw_info.basic_info.hi), &(hw_info.basic_info.lo));
vos_get_msr(VMX_MISC_MSR, &(hw_info.misc_info.hi), &(hw_info.misc_info.lo));
covirt_get_msr(VMX_BASIC_MSR, &(hw_info.basic_info.hi), &(hw_info.basic_info.lo));
covirt_get_msr(VMX_MISC_MSR, &(hw_info.misc_info.hi), &(hw_info.misc_info.lo));
ERROR("BASIC_MSR: Lo: %x, Hi: %x\n", hw_info.basic_info.lo, hw_info.basic_info.hi);
......@@ -128,14 +130,14 @@ vos_init_vmx_hw( void )
// Grab the EPT info MSR if either EPT or VPIDs are available in Secondary proc ctrls
if ( ((hw_info.sec_proc_ctrls.req_mask & 0x00000002) == 0) ||
((hw_info.sec_proc_ctrls.req_val & 0x00000002) != 0) ) {
vos_lnx_printk("Intel VMX: EPT supported\n");
pisces_printk("Intel VMX: EPT supported\n");
hw_info.caps.ept = 1;
vos_get_msr(VMX_EPT_VPID_CAP_MSR, &(hw_info.ept_info.hi), &(hw_info.ept_info.lo));
covirt_get_msr(VMX_EPT_VPID_CAP_MSR, &(hw_info.ept_info.hi), &(hw_info.ept_info.lo));
} else if ( ((hw_info.sec_proc_ctrls.req_mask & 0x00000002) == 0) ||
((hw_info.sec_proc_ctrls.req_val & 0x00000002) != 0) ) {
vos_get_msr(VMX_EPT_VPID_CAP_MSR, &(hw_info.ept_info.hi), &(hw_info.ept_info.lo));
covirt_get_msr(VMX_EPT_VPID_CAP_MSR, &(hw_info.ept_info.hi), &(hw_info.ept_info.lo));
}
......@@ -144,7 +146,7 @@ vos_init_vmx_hw( void )
*/
if ( ((hw_info.sec_proc_ctrls.req_mask & 0x00000080) == 0) ||
((hw_info.sec_proc_ctrls.req_val & 0x00000080) != 0)) {
vos_lnx_printk("Intel VMX: Unrestricted Guest supported\n");
pisces_printk("Intel VMX: Unrestricted Guest supported\n");
hw_info.caps.unrestricted_guest = 1;
// Intel has a bug(?) in the CR0 fixed bits detection with UG support
......@@ -159,7 +161,7 @@ vos_init_vmx_hw( void )
*/
if ( ((hw_info.pin_ctrls.req_mask & 0x00000040) == 0) ||
((hw_info.pin_ctrls.req_val & 0x00000040) != 0)) {
vos_lnx_printk("Intel VMX: Preemption Timer supported\n");
pisces_printk("Intel VMX: Preemption Timer supported\n");
hw_info.caps.preempt_timer = 1;
}
......@@ -170,7 +172,7 @@ vos_init_vmx_hw( void )
*/
if ( ((hw_info.exit_ctrls.req_mask & 0x00040000) == 0) ||
((hw_info.exit_ctrls.req_val & 0x00040000) != 0)) {
vos_lnx_printk("Intel VMX: Virtualized PAT supported\n");
pisces_printk("Intel VMX: Virtualized PAT supported\n");
hw_info.caps.virt_pat = 1;
}
......@@ -179,7 +181,7 @@ vos_init_vmx_hw( void )
*/
if ( ((hw_info.exit_ctrls.req_mask & 0x00100000) == 0) ||
((hw_info.exit_ctrls.req_val & 0x00100000) != 0)) {
vos_lnx_printk("Intel VMX: Virtualized EFER supported\n");
pisces_printk("Intel VMX: Virtualized EFER supported\n");
hw_info.caps.virt_efer = 1;
}
......
......@@ -9,8 +9,6 @@
#ifndef __VMX_HW_INFO_H__
#define __VMX_HW_INFO_H__
#include "vos.h"
#define VMX_BASIC_MSR 0x00000480
#define VMX_PINBASED_CTLS_MSR 0x00000481
#define VMX_PROCBASED_CTLS_MSR 0x00000482
......@@ -159,10 +157,10 @@ struct vmx_hw_info {
int vos_init_vmx_hw( void );
int covirt_init_vmx_hw( void );
struct vmx_hw_info *
vos_get_vmx_hw_info( void );
covirt_get_vmx_hw_info( void );
......
/*
* Copyright (c) 2014, Jack Lange <jacklange@cs.pitt.edu>
* All rights reserved.
*
* This is free software. You are permitted to use,
* redistribute, and modify it as specified in the file "V3VEE_LICENSE".
*/
#include <linux/version.h>
#include <asm/tlbflush.h>
#include "vmx_hw_info.h"
#include "vmx.h"
/* Opcode definitions for all the VM instructions */
#define VMCLEAR_OPCODE ".byte 0x66,0xf,0xc7;" /* reg=/6 */
#define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3;"
#define VMPTRLD_OPCODE ".byte 0x0f,0xc7;" /* reg=/6 */
#define VMPTRST_OPCODE ".byte 0x0f,0xc7;" /* reg=/7 */
#define VMREAD_OPCODE ".byte 0x0f,0x78;"
#define VMWRITE_OPCODE ".byte 0x0f,0x79;"
#define VMXOFF_OPCODE ".byte 0x0f,0x01,0xc4;"
#define VMXON_OPCODE ".byte 0xf3,0x0f,0xc7;" /* reg=/6 */
#define INVEPT_OPCODE ".byte 0x66,0x0f,0x38,0x80,0x08;"
#define INVVPID_OPCODE ".byte 0x66,0x0f,0x38,0x81,0x08;"
/* Mod/rm definitions for intel registers/memory */
#define EAX_ECX_MODRM ".byte 0xc1;"
// %eax with /6 reg
#define EAX_06_MODRM ".byte 0x30;"
// %eax with /7 reg
#define EAX_07_MODRM ".byte 0x38;"
// vmfail macro
#define CHECK_VMXFAIL(ret_valid, ret_invalid) \
if (ret_valid) { \
return VMX_FAIL_VALID; \
} else if (ret_invalid) { \
return VMX_FAIL_INVALID; \
}
int
enable_vmx(void)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0)
__asm__ __volatile__ (
"movq %%cr4, %%rbx;"
"orq $0x00002000, %%rbx;"
"movq %%rbx, %%cr4;"
:
:
: "%rbx"
);
#else
cr4_set_bits(X86_CR4_VMXE);
#endif
__asm__ __volatile__ (
"movq %%cr0, %%rbx; "
"orq $0x00000020,%%rbx; "
"movq %%rbx, %%cr0;"
:
:
: "%rbx"
);
return 0;
}
int
vmx_on(uintptr_t vmxon_ptr)
{
u64 vmxon_ptr_64 __attribute__((aligned(8))) = (u64)vmxon_ptr;
u8 ret_invalid = 0;
__asm__ __volatile__ (
VMXON_OPCODE
EAX_06_MODRM
"setnaeb %0;" // fail invalid (CF=1)
: "=q"(ret_invalid)
: "a"(&vmxon_ptr_64),"0"(ret_invalid)
: "memory");
if (ret_invalid) {
return VMX_FAIL_INVALID;
} else {
return VMX_SUCCESS;
}
}
int
vmx_off(void)
{
u8 ret_valid = 0;
u8 ret_invalid = 0;
__asm__ __volatile__ (
VMXOFF_OPCODE
"seteb %0;"
"setnaeb %1;"
: "=q"(ret_valid), "=q"(ret_invalid)
: "0"(ret_valid), "1"(ret_invalid)
: "memory");
CHECK_VMXFAIL(ret_valid, ret_invalid);
return VMX_SUCCESS;
}
int
vmcs_clear(uintptr_t vmcs_ptr)
{
u64 vmcs_ptr_64 __attribute__ ((aligned(8))) = (u64)vmcs_ptr;
u8 ret_valid = 0;
u8 ret_invalid = 0;
__asm__ __volatile__ (
VMCLEAR_OPCODE
EAX_06_MODRM
"seteb %0;" // fail valid (ZF=1)
"setnaeb %1;" // fail invalid (CF=1)
: "=q"(ret_valid), "=q"(ret_invalid)
: "a"(&vmcs_ptr_64), "0"(ret_valid), "1"(ret_invalid)
: "memory"
);
CHECK_VMXFAIL(ret_valid, ret_invalid);
return VMX_SUCCESS;
}
int
vmcs_load(uintptr_t vmcs_ptr)
{
u64 vmcs_ptr_64 = (u64)vmcs_ptr;
u8 ret_valid = 0;
u8 ret_invalid = 0;
__asm__ __volatile__ (
VMPTRLD_OPCODE
EAX_06_MODRM
"seteb %0;" // fail valid (ZF=1)
"setnaeb %1;" // fail invalid (CF=1)
: "=q"(ret_valid), "=q"(ret_invalid)
: "a"(&vmcs_ptr_64), "0"(ret_valid), "1"(ret_invalid)
: "memory"
);
CHECK_VMXFAIL(ret_valid, ret_invalid);
return VMX_SUCCESS;
}