aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatias Zabaljauregui <zabaljauregui@gmail.com>2009-03-18 13:38:35 -0300
committerRusty Russell <rusty@rustcorp.com.au>2009-03-30 21:55:25 +1030
commitdf1693abc42e34bbc4351e179dbe66c28a94efb8 (patch)
treeb0cec44a3ace1fbc8377c73428daf64848b48907
parent4cd8b5e2a159f18a1507f1187b44a1acbfa6341b (diff)
downloadkernel_samsung_smdk4412-df1693abc42e34bbc4351e179dbe66c28a94efb8.tar.gz
kernel_samsung_smdk4412-df1693abc42e34bbc4351e179dbe66c28a94efb8.tar.bz2
kernel_samsung_smdk4412-df1693abc42e34bbc4351e179dbe66c28a94efb8.zip
lguest: use bool instead of int
Impact: clean up Rusty told me, some time ago, that he had become a fan of "bool". So, here are some replacements. Signed-off-by: Matias Zabaljauregui <zabaljauregui at gmail.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
-rw-r--r--drivers/lguest/core.c4
-rw-r--r--drivers/lguest/interrupts_and_traps.c21
-rw-r--r--drivers/lguest/lg.h8
-rw-r--r--drivers/lguest/page_tables.c18
-rw-r--r--drivers/lguest/segments.c2
5 files changed, 27 insertions, 26 deletions
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index 60156dfdc60..4845fb3cf74 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -152,8 +152,8 @@ static void unmap_switcher(void)
* code. We have to check that the range is below the pfn_limit the Launcher
* gave us. We have to make sure that addr + len doesn't give us a false
* positive by overflowing, too. */
-int lguest_address_ok(const struct lguest *lg,
- unsigned long addr, unsigned long len)
+bool lguest_address_ok(const struct lguest *lg,
+ unsigned long addr, unsigned long len)
{
return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr);
}
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index 504091da173..6e99adbe194 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -34,7 +34,7 @@ static int idt_type(u32 lo, u32 hi)
}
/* An IDT entry can't be used unless the "present" bit is set. */
-static int idt_present(u32 lo, u32 hi)
+static bool idt_present(u32 lo, u32 hi)
{
return (hi & 0x8000);
}
@@ -60,7 +60,8 @@ static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val)
* We set up the stack just like the CPU does for a real interrupt, so it's
* identical for the Guest (and the standard "iret" instruction will undo
* it). */
-static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, int has_err)
+static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
+ bool has_err)
{
unsigned long gstack, origstack;
u32 eflags, ss, irq_enable;
@@ -184,7 +185,7 @@ void maybe_do_interrupt(struct lg_cpu *cpu)
/* set_guest_interrupt() takes the interrupt descriptor and a
* flag to say whether this interrupt pushes an error code onto
* the stack as well: virtual interrupts never do. */
- set_guest_interrupt(cpu, idt->a, idt->b, 0);
+ set_guest_interrupt(cpu, idt->a, idt->b, false);
}
/* Every time we deliver an interrupt, we update the timestamp in the
@@ -244,26 +245,26 @@ void free_interrupts(void)
/*H:220 Now we've got the routines to deliver interrupts, delivering traps like
* page fault is easy. The only trick is that Intel decided that some traps
* should have error codes: */
-static int has_err(unsigned int trap)
+static bool has_err(unsigned int trap)
{
return (trap == 8 || (trap >= 10 && trap <= 14) || trap == 17);
}
/* deliver_trap() returns true if it could deliver the trap. */
-int deliver_trap(struct lg_cpu *cpu, unsigned int num)
+bool deliver_trap(struct lg_cpu *cpu, unsigned int num)
{
/* Trap numbers are always 8 bit, but we set an impossible trap number
* for traps inside the Switcher, so check that here. */
if (num >= ARRAY_SIZE(cpu->arch.idt))
- return 0;
+ return false;
/* Early on the Guest hasn't set the IDT entries (or maybe it put a
* bogus one in): if we fail here, the Guest will be killed. */
if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b))
- return 0;
+ return false;
set_guest_interrupt(cpu, cpu->arch.idt[num].a,
cpu->arch.idt[num].b, has_err(num));
- return 1;
+ return true;
}
/*H:250 Here's the hard part: returning to the Host every time a trap happens
@@ -279,12 +280,12 @@ int deliver_trap(struct lg_cpu *cpu, unsigned int num)
*
* This routine indicates if a particular trap number could be delivered
* directly. */
-static int direct_trap(unsigned int num)
+static bool direct_trap(unsigned int num)
{
/* Hardware interrupts don't go to the Guest at all (except system
* call). */
if (num >= FIRST_EXTERNAL_VECTOR && !could_be_syscall(num))
- return 0;
+ return false;
/* The Host needs to see page faults (for shadow paging and to save the
* fault address), general protection faults (in/out emulation) and
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index f2c641e0bdd..ac8a4a3741b 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -109,8 +109,8 @@ struct lguest
extern struct mutex lguest_lock;
/* core.c: */
-int lguest_address_ok(const struct lguest *lg,
- unsigned long addr, unsigned long len);
+bool lguest_address_ok(const struct lguest *lg,
+ unsigned long addr, unsigned long len);
void __lgread(struct lg_cpu *, void *, unsigned long, unsigned);
void __lgwrite(struct lg_cpu *, unsigned long, const void *, unsigned);
@@ -140,7 +140,7 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user);
/* interrupts_and_traps.c: */
void maybe_do_interrupt(struct lg_cpu *cpu);
-int deliver_trap(struct lg_cpu *cpu, unsigned int num);
+bool deliver_trap(struct lg_cpu *cpu, unsigned int num);
void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int i,
u32 low, u32 hi);
void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages);
@@ -173,7 +173,7 @@ void guest_pagetable_flush_user(struct lg_cpu *cpu);
void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir,
unsigned long vaddr, pte_t val);
void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages);
-int demand_page(struct lg_cpu *cpu, unsigned long cr2, int errcode);
+bool demand_page(struct lg_cpu *cpu, unsigned long cr2, int errcode);
void pin_page(struct lg_cpu *cpu, unsigned long vaddr);
unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr);
void page_table_guest_data_init(struct lg_cpu *cpu);
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index 82ff484bd8c..a059cf9980f 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -199,7 +199,7 @@ static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
*
* If we fixed up the fault (ie. we mapped the address), this routine returns
* true. Otherwise, it was a real fault and we need to tell the Guest. */
-int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
+bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
{
pgd_t gpgd;
pgd_t *spgd;
@@ -211,7 +211,7 @@ int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
/* Toplevel not present? We can't map it in. */
if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
- return 0;
+ return false;
/* Now look at the matching shadow entry. */
spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
@@ -222,7 +222,7 @@ int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
* simple for this corner case. */
if (!ptepage) {
kill_guest(cpu, "out of memory allocating pte page");
- return 0;
+ return false;
}
/* We check that the Guest pgd is OK. */
check_gpgd(cpu, gpgd);
@@ -238,16 +238,16 @@ int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
/* If this page isn't in the Guest page tables, we can't page it in. */
if (!(pte_flags(gpte) & _PAGE_PRESENT))
- return 0;
+ return false;
/* Check they're not trying to write to a page the Guest wants
* read-only (bit 2 of errcode == write). */
if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW))
- return 0;
+ return false;
/* User access to a kernel-only page? (bit 3 == user access) */
if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
- return 0;
+ return false;
/* Check that the Guest PTE flags are OK, and the page number is below
* the pfn_limit (ie. not mapping the Launcher binary). */
@@ -283,7 +283,7 @@ int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
* manipulated, the result returned and the code complete. A small
* delay and a trace of alliteration are the only indications the Guest
* has that a page fault occurred at all. */
- return 1;
+ return true;
}
/*H:360
@@ -296,7 +296,7 @@ int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
*
* This is a quick version which answers the question: is this virtual address
* mapped by the shadow page tables, and is it writable? */
-static int page_writable(struct lg_cpu *cpu, unsigned long vaddr)
+static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
{
pgd_t *spgd;
unsigned long flags;
@@ -304,7 +304,7 @@ static int page_writable(struct lg_cpu *cpu, unsigned long vaddr)
/* Look at the current top level entry: is it present? */
spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
if (!(pgd_flags(*spgd) & _PAGE_PRESENT))
- return 0;
+ return false;
/* Check the flags on the pte entry itself: it must be present and
* writable. */
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
index ec6aa3f1c36..4f15439b7f1 100644
--- a/drivers/lguest/segments.c
+++ b/drivers/lguest/segments.c
@@ -45,7 +45,7 @@
* "Task State Segment" which controls all kinds of delicate things. The
* LGUEST_CS and LGUEST_DS entries are reserved for the Switcher, and the
* the Guest can't be trusted to deal with double faults. */
-static int ignored_gdt(unsigned int num)
+static bool ignored_gdt(unsigned int num)
{
return (num == GDT_ENTRY_TSS
|| num == GDT_ENTRY_LGUEST_CS