diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/atomic.h | 20 | ||||
-rw-r--r-- | include/asm-generic/bug.h | 34 | ||||
-rw-r--r-- | include/asm-generic/dma-mapping-common.h | 20 | ||||
-rw-r--r-- | include/asm-generic/fcntl.h | 8 | ||||
-rw-r--r-- | include/asm-generic/gpio.h | 26 | ||||
-rw-r--r-- | include/asm-generic/io.h | 4 | ||||
-rw-r--r-- | include/asm-generic/kmap_types.h | 6 | ||||
-rw-r--r-- | include/asm-generic/local64.h | 96 | ||||
-rw-r--r-- | include/asm-generic/percpu.h | 10 | ||||
-rw-r--r-- | include/asm-generic/scatterlist.h | 17 | ||||
-rw-r--r-- | include/asm-generic/statfs.h | 9 | ||||
-rw-r--r-- | include/asm-generic/topology.h | 23 | ||||
-rw-r--r-- | include/asm-generic/unistd.h | 33 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 61 |
14 files changed, 275 insertions, 92 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index c33749f95b3..e53347fbf1d 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -30,8 +30,7 @@ * atomic_read - read atomic variable * @v: pointer of type atomic_t * - * Atomically reads the value of @v. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically reads the value of @v. */ #define atomic_read(v) (*(volatile int *)&(v)->counter) @@ -40,8 +39,7 @@ * @v: pointer of type atomic_t * @i: required value * - * Atomically sets the value of @v to @i. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically sets the value of @v to @i. */ #define atomic_set(v, i) (((v)->counter) = (i)) @@ -53,18 +51,17 @@ * @v: pointer of type atomic_t * * Atomically adds @i to @v and returns the result - * Note that the guaranteed useful range of an atomic_t is only 24 bits. */ static inline int atomic_add_return(int i, atomic_t *v) { unsigned long flags; int temp; - local_irq_save(flags); + raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ temp = v->counter; temp += i; v->counter = temp; - local_irq_restore(flags); + raw_local_irq_restore(flags); return temp; } @@ -75,18 +72,17 @@ static inline int atomic_add_return(int i, atomic_t *v) * @v: pointer of type atomic_t * * Atomically subtracts @i from @v and returns the result - * Note that the guaranteed useful range of an atomic_t is only 24 bits. */ static inline int atomic_sub_return(int i, atomic_t *v) { unsigned long flags; int temp; - local_irq_save(flags); + raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ temp = v->counter; temp -= i; v->counter = temp; - local_irq_restore(flags); + raw_local_irq_restore(flags); return temp; } @@ -139,9 +135,9 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) unsigned long flags; mask = ~mask; - local_irq_save(flags); + raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ *addr &= mask; - local_irq_restore(flags); + raw_local_irq_restore(flags); } #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 18c435d7c08..c2c9ba032d4 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -25,7 +25,10 @@ struct bug_entry { }; #endif /* __ASSEMBLY__ */ -#define BUGFLAG_WARNING (1<<0) +#define BUGFLAG_WARNING (1 << 0) +#define BUGFLAG_TAINT(taint) (BUGFLAG_WARNING | ((taint) << 8)) +#define BUG_GET_TAINT(bug) ((bug)->flags >> 8) + #endif /* CONFIG_GENERIC_BUG */ /* @@ -56,17 +59,25 @@ struct bug_entry { * appear at runtime. Use the versions with printk format strings * to provide better diagnostics. */ -#ifndef __WARN +#ifndef __WARN_TAINT #ifndef __ASSEMBLY__ extern void warn_slowpath_fmt(const char *file, const int line, const char *fmt, ...) __attribute__((format(printf, 3, 4))); +extern void warn_slowpath_fmt_taint(const char *file, const int line, + unsigned taint, const char *fmt, ...) + __attribute__((format(printf, 4, 5))); extern void warn_slowpath_null(const char *file, const int line); #define WANT_WARN_ON_SLOWPATH #endif #define __WARN() warn_slowpath_null(__FILE__, __LINE__) #define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg) +#define __WARN_printf_taint(taint, arg...) \ + warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg) #else +#define __WARN() __WARN_TAINT(TAINT_WARN) #define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0) +#define __WARN_printf_taint(taint, arg...) \ + do { printk(arg); __WARN_TAINT(taint); } while (0) #endif #ifndef WARN_ON @@ -87,6 +98,13 @@ extern void warn_slowpath_null(const char *file, const int line); }) #endif +#define WARN_TAINT(condition, taint, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf_taint(taint, format); \ + unlikely(__ret_warn_on); \ +}) + #else /* !CONFIG_BUG */ #ifndef HAVE_ARCH_BUG #define BUG() do {} while(0) @@ -110,6 +128,8 @@ extern void warn_slowpath_null(const char *file, const int line); }) #endif +#define WARN_TAINT(condition, taint, format...) WARN_ON(condition) + #endif #define WARN_ON_ONCE(condition) ({ \ @@ -132,6 +152,16 @@ extern void warn_slowpath_null(const char *file, const int line); unlikely(__ret_warn_once); \ }) +#define WARN_TAINT_ONCE(condition, taint, format...) ({ \ + static bool __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once)) \ + if (WARN_TAINT(!__warned, taint, format)) \ + __warned = true; \ + unlikely(__ret_warn_once); \ +}) + #define WARN_ON_RATELIMIT(condition, state) \ WARN_ON((condition) && __ratelimit(state)) diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h index 69206957b72..0c80bb38773 100644 --- a/include/asm-generic/dma-mapping-common.h +++ b/include/asm-generic/dma-mapping-common.h @@ -123,15 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, size_t size, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (ops->sync_single_range_for_cpu) { - ops->sync_single_range_for_cpu(dev, addr, offset, size, dir); - debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); - - } else - dma_sync_single_for_cpu(dev, addr + offset, size, dir); + dma_sync_single_for_cpu(dev, addr + offset, size, dir); } static inline void dma_sync_single_range_for_device(struct device *dev, @@ -140,15 +132,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev, size_t size, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (ops->sync_single_range_for_device) { - ops->sync_single_range_for_device(dev, addr, offset, size, dir); - debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); - - } else - dma_sync_single_for_device(dev, addr + offset, size, dir); + dma_sync_single_for_device(dev, addr + offset, size, dir); } static inline void diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h index fcd268ce067..e3cbc38bdcc 100644 --- a/include/asm-generic/fcntl.h +++ b/include/asm-generic/fcntl.h @@ -3,6 +3,14 @@ #include <linux/types.h> +/* + * FMODE_EXEC is 0x20 + * FMODE_NONOTIFY is 0x1000000 + * These cannot be used by userspace O_* until internal and external open + * flags are split. + * -Eric Paris + */ + #define O_ACCMODE 00000003 #define O_RDONLY 00000000 #define O_WRONLY 00000001 diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index 979c6a57f2f..c7376bf80b0 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h @@ -31,6 +31,7 @@ static inline int gpio_is_valid(int number) struct device; struct seq_file; struct module; +struct device_node; /** * struct gpio_chip - abstract a GPIO controller @@ -60,7 +61,9 @@ struct module; * @names: if set, must be an array of strings to use as alternative * names for the GPIOs in this chip. Any entry in the array * may be NULL if there is no alias for the GPIO, however the - * array must be @ngpio entries long. + * array must be @ngpio entries long. A name can include a single printk + * format specifier for an unsigned int. It is substituted by the actual + * number of the gpio. * * A gpio_chip can help platforms abstract various sources of GPIOs so * they can all be accessed through a common programing interface. @@ -88,6 +91,9 @@ struct gpio_chip { unsigned offset); int (*direction_output)(struct gpio_chip *chip, unsigned offset, int value); + int (*set_debounce)(struct gpio_chip *chip, + unsigned offset, unsigned debounce); + void (*set)(struct gpio_chip *chip, unsigned offset, int value); @@ -98,9 +104,20 @@ struct gpio_chip { struct gpio_chip *chip); int base; u16 ngpio; - char **names; + const char *const *names; unsigned can_sleep:1; unsigned exported:1; + +#if defined(CONFIG_OF_GPIO) + /* + * If CONFIG_OF is enabled, then all GPIO controllers described in the + * device tree automatically may have an OF translation + */ + struct device_node *of_node; + int of_gpio_n_cells; + int (*of_xlate)(struct gpio_chip *gc, struct device_node *np, + const void *gpio_spec, u32 *flags); +#endif }; extern const char *gpiochip_is_requested(struct gpio_chip *chip, @@ -110,6 +127,9 @@ extern int __must_check gpiochip_reserve(int start, int ngpio); /* add/remove chips */ extern int gpiochip_add(struct gpio_chip *chip); extern int __must_check gpiochip_remove(struct gpio_chip *chip); +extern struct gpio_chip *gpiochip_find(void *data, + int (*match)(struct gpio_chip *chip, + void *data)); /* Always use the library code for GPIO management calls, @@ -121,6 +141,8 @@ extern void gpio_free(unsigned gpio); extern int gpio_direction_input(unsigned gpio); extern int gpio_direction_output(unsigned gpio, int value); +extern int gpio_set_debounce(unsigned gpio, unsigned debounce); + extern int gpio_get_value_cansleep(unsigned gpio); extern void gpio_set_value_cansleep(unsigned gpio, int value); diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index bcee6365dca..118601fce92 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -188,11 +188,15 @@ static inline void outsl(unsigned long addr, const void *buffer, int count) #ifndef CONFIG_GENERIC_IOMAP #define ioread8(addr) readb(addr) #define ioread16(addr) readw(addr) +#define ioread16be(addr) be16_to_cpu(ioread16(addr)) #define ioread32(addr) readl(addr) +#define ioread32be(addr) be32_to_cpu(ioread32(addr)) #define iowrite8(v, addr) writeb((v), (addr)) #define iowrite16(v, addr) writew((v), (addr)) +#define iowrite16be(v, addr) iowrite16(be16_to_cpu(v), (addr)) #define iowrite32(v, addr) writel((v), (addr)) +#define iowrite32be(v, addr) iowrite32(be32_to_cpu(v), (addr)) #define ioread8_rep(p, dst, count) \ insb((unsigned long) (p), (dst), (count)) diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h index e5f234a0854..0232ccb76f2 100644 --- a/include/asm-generic/kmap_types.h +++ b/include/asm-generic/kmap_types.h @@ -28,7 +28,11 @@ KMAP_D(15) KM_UML_USERCOPY, KMAP_D(16) KM_IRQ_PTE, KMAP_D(17) KM_NMI, KMAP_D(18) KM_NMI_PTE, -KMAP_D(19) KM_TYPE_NR +KMAP_D(19) KM_KDB, +/* + * Remember to update debug_kmap_atomic() when adding new kmap types! + */ +KMAP_D(20) KM_TYPE_NR }; #undef KMAP_D diff --git a/include/asm-generic/local64.h b/include/asm-generic/local64.h new file mode 100644 index 00000000000..02ac760c1a8 --- /dev/null +++ b/include/asm-generic/local64.h @@ -0,0 +1,96 @@ +#ifndef _ASM_GENERIC_LOCAL64_H +#define _ASM_GENERIC_LOCAL64_H + +#include <linux/percpu.h> +#include <asm/types.h> + +/* + * A signed long type for operations which are atomic for a single CPU. + * Usually used in combination with per-cpu variables. + * + * This is the default implementation, which uses atomic64_t. Which is + * rather pointless. The whole point behind local64_t is that some processors + * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs + * running on this CPU. local64_t allows exploitation of such capabilities. + */ + +/* Implement in terms of atomics. */ + +#if BITS_PER_LONG == 64 + +#include <asm/local.h> + +typedef struct { + local_t a; +} local64_t; + +#define LOCAL64_INIT(i) { LOCAL_INIT(i) } + +#define local64_read(l) local_read(&(l)->a) +#define local64_set(l,i) local_set((&(l)->a),(i)) +#define local64_inc(l) local_inc(&(l)->a) +#define local64_dec(l) local_dec(&(l)->a) +#define local64_add(i,l) local_add((i),(&(l)->a)) +#define local64_sub(i,l) local_sub((i),(&(l)->a)) + +#define local64_sub_and_test(i, l) local_sub_and_test((i), (&(l)->a)) +#define local64_dec_and_test(l) local_dec_and_test(&(l)->a) +#define local64_inc_and_test(l) local_inc_and_test(&(l)->a) +#define local64_add_negative(i, l) local_add_negative((i), (&(l)->a)) +#define local64_add_return(i, l) local_add_return((i), (&(l)->a)) +#define local64_sub_return(i, l) local_sub_return((i), (&(l)->a)) +#define local64_inc_return(l) local_inc_return(&(l)->a) + +#define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n)) +#define local64_xchg(l, n) local_xchg((&(l)->a), (n)) +#define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u)) +#define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a) + +/* Non-atomic variants, ie. preemption disabled and won't be touched + * in interrupt, etc. Some archs can optimize this case well. */ +#define __local64_inc(l) local64_set((l), local64_read(l) + 1) +#define __local64_dec(l) local64_set((l), local64_read(l) - 1) +#define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) +#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) + +#else /* BITS_PER_LONG != 64 */ + +#include <asm/atomic.h> + +/* Don't use typedef: don't want them to be mixed with atomic_t's. */ +typedef struct { + atomic64_t a; +} local64_t; + +#define LOCAL64_INIT(i) { ATOMIC_LONG_INIT(i) } + +#define local64_read(l) atomic64_read(&(l)->a) +#define local64_set(l,i) atomic64_set((&(l)->a),(i)) +#define local64_inc(l) atomic64_inc(&(l)->a) +#define local64_dec(l) atomic64_dec(&(l)->a) +#define local64_add(i,l) atomic64_add((i),(&(l)->a)) +#define local64_sub(i,l) atomic64_sub((i),(&(l)->a)) + +#define local64_sub_and_test(i, l) atomic64_sub_and_test((i), (&(l)->a)) +#define local64_dec_and_test(l) atomic64_dec_and_test(&(l)->a) +#define local64_inc_and_test(l) atomic64_inc_and_test(&(l)->a) +#define local64_add_negative(i, l) atomic64_add_negative((i), (&(l)->a)) +#define local64_add_return(i, l) atomic64_add_return((i), (&(l)->a)) +#define local64_sub_return(i, l) atomic64_sub_return((i), (&(l)->a)) +#define local64_inc_return(l) atomic64_inc_return(&(l)->a) + +#define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n)) +#define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n)) +#define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u)) +#define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a) + +/* Non-atomic variants, ie. preemption disabled and won't be touched + * in interrupt, etc. Some archs can optimize this case well. */ +#define __local64_inc(l) local64_set((l), local64_read(l) + 1) +#define __local64_dec(l) local64_set((l), local64_read(l) - 1) +#define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) +#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) + +#endif /* BITS_PER_LONG != 64 */ + +#endif /* _ASM_GENERIC_LOCAL64_H */ diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 04f91c2d3f7..b5043a9890d 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -80,7 +80,7 @@ extern void setup_per_cpu_areas(void); #ifndef PER_CPU_BASE_SECTION #ifdef CONFIG_SMP -#define PER_CPU_BASE_SECTION ".data.percpu" +#define PER_CPU_BASE_SECTION ".data..percpu" #else #define PER_CPU_BASE_SECTION ".data" #endif @@ -92,15 +92,15 @@ extern void setup_per_cpu_areas(void); #define PER_CPU_SHARED_ALIGNED_SECTION "" #define PER_CPU_ALIGNED_SECTION "" #else -#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned" -#define PER_CPU_ALIGNED_SECTION ".shared_aligned" +#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" +#define PER_CPU_ALIGNED_SECTION "..shared_aligned" #endif -#define PER_CPU_FIRST_SECTION ".first" +#define PER_CPU_FIRST_SECTION "..first" #else #define PER_CPU_SHARED_ALIGNED_SECTION "" -#define PER_CPU_ALIGNED_SECTION ".shared_aligned" +#define PER_CPU_ALIGNED_SECTION "..shared_aligned" #define PER_CPU_FIRST_SECTION "" #endif diff --git a/include/asm-generic/scatterlist.h b/include/asm-generic/scatterlist.h index 8b9454496a7..5de07355fad 100644 --- a/include/asm-generic/scatterlist.h +++ b/include/asm-generic/scatterlist.h @@ -11,7 +11,9 @@ struct scatterlist { unsigned int offset; unsigned int length; dma_addr_t dma_address; +#ifdef CONFIG_NEED_SG_DMA_LENGTH unsigned int dma_length; +#endif }; /* @@ -22,22 +24,11 @@ struct scatterlist { * is 0. */ #define sg_dma_address(sg) ((sg)->dma_address) -#ifndef sg_dma_len -/* - * Normally, you have an iommu on 64 bit machines, but not on 32 bit - * machines. Architectures that are differnt should override this. - */ -#if __BITS_PER_LONG == 64 + +#ifdef CONFIG_NEED_SG_DMA_LENGTH #define sg_dma_len(sg) ((sg)->dma_length) #else #define sg_dma_len(sg) ((sg)->length) -#endif /* 64 bit */ -#endif /* sg_dma_len */ - -#ifndef ISA_DMA_THRESHOLD -#define ISA_DMA_THRESHOLD (~0UL) #endif -#define ARCH_HAS_SG_CHAIN - #endif /* __ASM_GENERIC_SCATTERLIST_H */ diff --git a/include/asm-generic/statfs.h b/include/asm-generic/statfs.h index 3b4fb3e52f0..0fd28e028de 100644 --- a/include/asm-generic/statfs.h +++ b/include/asm-generic/statfs.h @@ -33,7 +33,8 @@ struct statfs { __kernel_fsid_t f_fsid; __statfs_word f_namelen; __statfs_word f_frsize; - __statfs_word f_spare[5]; + __statfs_word f_flags; + __statfs_word f_spare[4]; }; /* @@ -55,7 +56,8 @@ struct statfs64 { __kernel_fsid_t f_fsid; __statfs_word f_namelen; __statfs_word f_frsize; - __statfs_word f_spare[5]; + __statfs_word f_flags; + __statfs_word f_spare[4]; } ARCH_PACK_STATFS64; /* @@ -77,7 +79,8 @@ struct compat_statfs64 { __kernel_fsid_t f_fsid; __u32 f_namelen; __u32 f_frsize; - __u32 f_spare[5]; + __u32 f_flags; + __u32 f_spare[4]; } ARCH_PACK_COMPAT_STATFS64; #endif diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h index 510df36dd5d..fc824e2828f 100644 --- a/include/asm-generic/topology.h +++ b/include/asm-generic/topology.h @@ -5,7 +5,7 @@ * * Copyright (C) 2002, IBM Corp. * - * All rights reserved. + * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -34,6 +34,16 @@ #ifndef cpu_to_node #define cpu_to_node(cpu) ((void)(cpu),0) #endif +#ifndef set_numa_node +#define set_numa_node(node) +#endif +#ifndef set_cpu_numa_node +#define set_cpu_numa_node(cpu, node) +#endif +#ifndef cpu_to_mem +#define cpu_to_mem(cpu) ((void)(cpu),0) +#endif + #ifndef parent_node #define parent_node(node) ((void)(node),0) #endif @@ -52,4 +62,15 @@ #endif /* CONFIG_NUMA */ +#if !defined(CONFIG_NUMA) || !defined(CONFIG_HAVE_MEMORYLESS_NODES) + +#ifndef set_numa_mem +#define set_numa_mem(node) +#endif +#ifndef set_cpu_numa_mem +#define set_cpu_numa_mem(cpu, node) +#endif + +#endif /* !CONFIG_NUMA || !CONFIG_HAVE_MEMORYLESS_NODES */ + #endif /* _ASM_GENERIC_TOPOLOGY_H */ diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h index 6a0b30f78a6..c17cebc4995 100644 --- a/include/asm-generic/unistd.h +++ b/include/asm-generic/unistd.h @@ -18,7 +18,7 @@ #define __SYSCALL(x, y) #endif -#if __BITS_PER_LONG == 32 +#if __BITS_PER_LONG == 32 || defined(__SYSCALL_COMPAT) #define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _32) #else #define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _64) @@ -241,8 +241,13 @@ __SYSCALL(__NR_sync, sys_sync) __SYSCALL(__NR_fsync, sys_fsync) #define __NR_fdatasync 83 __SYSCALL(__NR_fdatasync, sys_fdatasync) +#ifdef __ARCH_WANT_SYNC_FILE_RANGE2 +#define __NR_sync_file_range2 84 +__SYSCALL(__NR_sync_file_range2, sys_sync_file_range2) +#else #define __NR_sync_file_range 84 -__SYSCALL(__NR_sync_file_range, sys_sync_file_range) /* .long sys_sync_file_range2, */ +__SYSCALL(__NR_sync_file_range, sys_sync_file_range) +#endif /* fs/timerfd.c */ #define __NR_timerfd_create 85 @@ -580,7 +585,7 @@ __SYSCALL(__NR_execve, sys_execve) /* .long sys_execve_wrapper */ __SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap) /* mm/fadvise.c */ #define __NR3264_fadvise64 223 -__SC_3264(__NR3264_fadvise64, sys_fadvise64_64, sys_fadvise64) +__SYSCALL(__NR3264_fadvise64, sys_fadvise64_64) /* mm/, CONFIG_MMU only */ #ifndef __ARCH_NOMMU @@ -627,8 +632,17 @@ __SYSCALL(__NR_accept4, sys_accept4) #define __NR_recvmmsg 243 __SYSCALL(__NR_recvmmsg, sys_recvmmsg) +/* + * Architectures may provide up to 16 syscalls of their own + * starting with this value. + */ +#define __NR_arch_specific_syscall 244 + +#define __NR_wait4 260 +__SYSCALL(__NR_wait4, sys_wait4) + #undef __NR_syscalls -#define __NR_syscalls 244 +#define __NR_syscalls 261 /* * All syscalls below here should go away really, @@ -694,7 +708,8 @@ __SYSCALL(__NR_signalfd, sys_signalfd) #define __NR_syscalls (__NR_signalfd+1) #endif /* __ARCH_WANT_SYSCALL_NO_FLAGS */ -#if __BITS_PER_LONG == 32 && defined(__ARCH_WANT_SYSCALL_OFF_T) +#if (__BITS_PER_LONG == 32 || defined(__SYSCALL_COMPAT)) && \ + defined(__ARCH_WANT_SYSCALL_OFF_T) #define __NR_sendfile 1046 __SYSCALL(__NR_sendfile, sys_sendfile) #define __NR_ftruncate 1047 @@ -740,6 +755,7 @@ __SYSCALL(__NR_getpgrp, sys_getpgrp) __SYSCALL(__NR_pause, sys_pause) #define __NR_time 1062 #define __ARCH_WANT_SYS_TIME +#define __ARCH_WANT_COMPAT_SYS_TIME __SYSCALL(__NR_time, sys_time) #define __NR_utime 1063 #define __ARCH_WANT_SYS_UTIME @@ -763,8 +779,8 @@ __SYSCALL(__NR_epoll_wait, sys_epoll_wait) __SYSCALL(__NR_ustat, sys_ustat) #define __NR_vfork 1071 __SYSCALL(__NR_vfork, sys_vfork) -#define __NR_wait4 1072 -__SYSCALL(__NR_wait4, sys_wait4) +#define __NR_oldwait4 1072 +__SYSCALL(__NR_oldwait4, sys_wait4) #define __NR_recv 1073 __SYSCALL(__NR_recv, sys_recv) #define __NR_send 1074 @@ -801,7 +817,7 @@ __SYSCALL(__NR_fork, sys_ni_syscall) * Here we map the numbers so that both versions * use the same syscall table layout. */ -#if __BITS_PER_LONG == 64 +#if __BITS_PER_LONG == 64 && !defined(__SYSCALL_COMPAT) #define __NR_fcntl __NR3264_fcntl #define __NR_statfs __NR3264_statfs #define __NR_fstatfs __NR3264_fstatfs @@ -848,6 +864,7 @@ __SYSCALL(__NR_fork, sys_ni_syscall) #endif #define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGSUSPEND +#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND /* * "Conditional" syscalls diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 67e652068e0..8a92a170fb7 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -63,6 +63,12 @@ /* Align . to a 8 byte boundary equals to maximum function alignment. */ #define ALIGN_FUNCTION() . = ALIGN(8) +/* + * Align to a 32 byte boundary equal to the + * alignment gcc 4.5 uses for a struct + */ +#define STRUCT_ALIGN() . = ALIGN(32) + /* The actual configuration determine if the init/exit sections * are handled as text/data or they can be discarded (which * often happens at runtime) @@ -150,10 +156,6 @@ CPU_KEEP(exit.data) \ MEM_KEEP(init.data) \ MEM_KEEP(exit.data) \ - . = ALIGN(8); \ - VMLINUX_SYMBOL(__start___markers) = .; \ - *(__markers) \ - VMLINUX_SYMBOL(__stop___markers) = .; \ . = ALIGN(32); \ VMLINUX_SYMBOL(__start___tracepoints) = .; \ *(__tracepoints) \ @@ -166,7 +168,11 @@ LIKELY_PROFILE() \ BRANCH_PROFILE() \ TRACE_PRINTKS() \ + \ + STRUCT_ALIGN(); \ FTRACE_EVENTS() \ + \ + STRUCT_ALIGN(); \ TRACE_SYSCALLS() /* @@ -175,25 +181,25 @@ #define NOSAVE_DATA \ . = ALIGN(PAGE_SIZE); \ VMLINUX_SYMBOL(__nosave_begin) = .; \ - *(.data.nosave) \ + *(.data..nosave) \ . = ALIGN(PAGE_SIZE); \ VMLINUX_SYMBOL(__nosave_end) = .; #define PAGE_ALIGNED_DATA(page_align) \ . = ALIGN(page_align); \ - *(.data.page_aligned) + *(.data..page_aligned) #define READ_MOSTLY_DATA(align) \ . = ALIGN(align); \ - *(.data.read_mostly) + *(.data..read_mostly) #define CACHELINE_ALIGNED_DATA(align) \ . = ALIGN(align); \ - *(.data.cacheline_aligned) + *(.data..cacheline_aligned) #define INIT_TASK_DATA(align) \ . = ALIGN(align); \ - *(.data.init_task) + *(.data..init_task) /* * Read only Data @@ -247,10 +253,10 @@ } \ \ /* RapidIO route ops */ \ - .rio_route : AT(ADDR(.rio_route) - LOAD_OFFSET) { \ - VMLINUX_SYMBOL(__start_rio_route_ops) = .; \ - *(.rio_route_ops) \ - VMLINUX_SYMBOL(__end_rio_route_ops) = .; \ + .rio_ops : AT(ADDR(.rio_ops) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start_rio_switch_ops) = .; \ + *(.rio_switch_ops) \ + VMLINUX_SYMBOL(__end_rio_switch_ops) = .; \ } \ \ TRACEDATA \ @@ -435,7 +441,7 @@ */ #define INIT_TASK_DATA_SECTION(align) \ . = ALIGN(align); \ - .data.init_task : { \ + .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \ INIT_TASK_DATA(align) \ } @@ -499,7 +505,7 @@ #define BSS(bss_align) \ . = ALIGN(bss_align); \ .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ - *(.bss.page_aligned) \ + *(.bss..page_aligned) \ *(.dynbss) \ *(.bss) \ *(COMMON) \ @@ -643,6 +649,7 @@ EXIT_DATA \ EXIT_CALL \ *(.discard) \ + *(.discard.*) \ } /** @@ -666,16 +673,16 @@ */ #define PERCPU_VADDR(vaddr, phdr) \ VMLINUX_SYMBOL(__per_cpu_load) = .; \ - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ + .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__per_cpu_start) = .; \ - *(.data.percpu.first) \ - *(.data.percpu.page_aligned) \ - *(.data.percpu) \ - *(.data.percpu.shared_aligned) \ + *(.data..percpu..first) \ + *(.data..percpu..page_aligned) \ + *(.data..percpu) \ + *(.data..percpu..shared_aligned) \ VMLINUX_SYMBOL(__per_cpu_end) = .; \ } phdr \ - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu); + . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); /** * PERCPU - define output section for percpu area, simple version @@ -687,18 +694,18 @@ * * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except * that __per_cpu_load is defined as a relative symbol against - * .data.percpu which is required for relocatable x86_32 + * .data..percpu which is required for relocatable x86_32 * configuration. */ #define PERCPU(align) \ . = ALIGN(align); \ - .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ + .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__per_cpu_load) = .; \ VMLINUX_SYMBOL(__per_cpu_start) = .; \ - *(.data.percpu.first) \ - *(.data.percpu.page_aligned) \ - *(.data.percpu) \ - *(.data.percpu.shared_aligned) \ + *(.data..percpu..first) \ + *(.data..percpu..page_aligned) \ + *(.data..percpu) \ + *(.data..percpu..shared_aligned) \ VMLINUX_SYMBOL(__per_cpu_end) = .; \ } |