2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-19 18:53:52 +08:00

Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile

Pull arch/tile updates from Chris Metcalf:
 "The only substantive pieces in this batch are some more vDSO support,
  and removing the reference to &platform_bus in tile-srom.c.

  The rest are minor issues reported to me"

* git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile:
  tile: add clock_gettime support to vDSO
  tile: switch to using seqlocks for the vDSO time code
  tile gxio: use better string copy primitive
  char: tile-srom: Add real platform bus parent
  Removed repeated word in comments
  tilegx: Enable ARCH_SUPPORTS_ATOMIC_RMW
  tile: Remove tile-specific _sinitdata and _einitdata
  tile: use ARRAY_SIZE
This commit is contained in:
Linus Torvalds 2014-10-08 05:36:23 -04:00
commit f8e4fae2e7
12 changed files with 248 additions and 97 deletions

View File

@ -134,6 +134,7 @@ config TILEGX
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_ARCH_KGDB
select ARCH_SUPPORTS_ATOMIC_RMW
config TILEPRO
def_bool !TILEGX

View File

@ -29,6 +29,32 @@
/* HACK: Avoid pointless "shadow" warnings. */
#define link link_shadow
/**
* strscpy - Copy a C-string into a sized buffer, but only if it fits
* @dest: Where to copy the string to
* @src: Where to copy the string from
* @size: size of destination buffer
*
* Use this routine to avoid copying too-long strings.
* The routine returns the total number of bytes copied
* (including the trailing NUL) or zero if the buffer wasn't
* big enough. To ensure that programmers pay attention
* to the return code, the destination has a single NUL
* written at the front (if size is non-zero) when the
* buffer is not big enough.
*/
static size_t strscpy(char *dest, const char *src, size_t size)
{
size_t len = strnlen(src, size) + 1;
if (len > size) {
if (size)
dest[0] = '\0';
return 0;
}
memcpy(dest, src, len);
return len;
}
int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
{
char file[32];
@ -511,8 +537,8 @@ int gxio_mpipe_link_instance(const char *link_name)
if (!context)
return GXIO_ERR_NO_DEVICE;
strncpy(name.name, link_name, sizeof(name.name));
name.name[GXIO_MPIPE_LINK_NAME_LEN - 1] = '\0';
if (strscpy(name.name, link_name, sizeof(name.name)) == 0)
return GXIO_ERR_NO_DEVICE;
return gxio_mpipe_info_instance_aux(context, name);
}
@ -529,7 +555,8 @@ int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac);
if (rv >= 0) {
strncpy(link_name, name.name, sizeof(name.name));
if (strscpy(link_name, name.name, sizeof(name.name)) == 0)
return GXIO_ERR_INVAL_MEMORY_SIZE;
memcpy(link_mac, mac.mac, sizeof(mac.mac));
}
@ -545,8 +572,8 @@ int gxio_mpipe_link_open(gxio_mpipe_link_t *link,
_gxio_mpipe_link_name_t name;
int rv;
strncpy(name.name, link_name, sizeof(name.name));
name.name[GXIO_MPIPE_LINK_NAME_LEN - 1] = '\0';
if (strscpy(name.name, link_name, sizeof(name.name)) == 0)
return GXIO_ERR_NO_DEVICE;
rv = gxio_mpipe_link_open_aux(context, name, flags);
if (rv < 0)

View File

@ -19,9 +19,6 @@
#include <asm-generic/sections.h>
/* Text and data are at different areas in the kernel VA space. */
extern char _sinitdata[], _einitdata[];
/* Write-once data is writable only till the end of initialization. */
extern char __w1data_begin[], __w1data_end[];

View File

@ -15,6 +15,7 @@
#ifndef __TILE_VDSO_H__
#define __TILE_VDSO_H__
#include <linux/seqlock.h>
#include <linux/types.h>
/*
@ -26,15 +27,20 @@
*/
struct vdso_data {
__u64 tz_update_count; /* Timezone atomicity ctr */
__u64 tb_update_count; /* Timebase atomicity ctr */
__u64 xtime_tod_stamp; /* TOD clock for xtime */
__u64 xtime_clock_sec; /* Kernel time second */
__u64 xtime_clock_nsec; /* Kernel time nanosecond */
__u64 wtom_clock_sec; /* Wall to monotonic clock second */
__u64 wtom_clock_nsec; /* Wall to monotonic clock nanosecond */
seqcount_t tz_seq; /* Timezone seqlock */
seqcount_t tb_seq; /* Timebase seqlock */
__u64 cycle_last; /* TOD clock for xtime */
__u64 mask; /* Cycle mask */
__u32 mult; /* Cycle to nanosecond multiplier */
__u32 shift; /* Cycle to nanosecond divisor (power of two) */
__u64 wall_time_sec;
__u64 wall_time_snsec;
__u64 monotonic_time_sec;
__u64 monotonic_time_snsec;
__u64 wall_time_coarse_sec;
__u64 wall_time_coarse_nsec;
__u64 monotonic_time_coarse_sec;
__u64 monotonic_time_coarse_nsec;
__u32 tz_minuteswest; /* Minutes west of Greenwich */
__u32 tz_dsttime; /* Type of dst correction */
};

View File

@ -360,19 +360,19 @@
* @{
*/
/** Use with with SIM_PROFILER_CHIP_xxx to control the memory controllers. */
/** Use with SIM_PROFILER_CHIP_xxx to control the memory controllers. */
#define SIM_CHIP_MEMCTL 0x001
/** Use with with SIM_PROFILER_CHIP_xxx to control the XAUI interface. */
/** Use with SIM_PROFILER_CHIP_xxx to control the XAUI interface. */
#define SIM_CHIP_XAUI 0x002
/** Use with with SIM_PROFILER_CHIP_xxx to control the PCIe interface. */
/** Use with SIM_PROFILER_CHIP_xxx to control the PCIe interface. */
#define SIM_CHIP_PCIE 0x004
/** Use with with SIM_PROFILER_CHIP_xxx to control the MPIPE interface. */
/** Use with SIM_PROFILER_CHIP_xxx to control the MPIPE interface. */
#define SIM_CHIP_MPIPE 0x008
/** Use with with SIM_PROFILER_CHIP_xxx to control the TRIO interface. */
/** Use with SIM_PROFILER_CHIP_xxx to control the TRIO interface. */
#define SIM_CHIP_TRIO 0x010
/** Reference all chip devices. */

View File

@ -249,33 +249,52 @@ cycles_t ns2cycles(unsigned long nsecs)
void update_vsyscall_tz(void)
{
/* Userspace gettimeofday will spin while this value is odd. */
++vdso_data->tz_update_count;
smp_wmb();
write_seqcount_begin(&vdso_data->tz_seq);
vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
vdso_data->tz_dsttime = sys_tz.tz_dsttime;
smp_wmb();
++vdso_data->tz_update_count;
write_seqcount_end(&vdso_data->tz_seq);
}
void update_vsyscall(struct timekeeper *tk)
{
struct timespec *wtm = &tk->wall_to_monotonic;
struct clocksource *clock = tk->tkr.clock;
if (clock != &cycle_counter_cs)
if (tk->tkr.clock != &cycle_counter_cs)
return;
/* Userspace gettimeofday will spin while this value is odd. */
++vdso_data->tb_update_count;
smp_wmb();
vdso_data->xtime_tod_stamp = tk->tkr.cycle_last;
vdso_data->xtime_clock_sec = tk->xtime_sec;
vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec;
vdso_data->wtom_clock_sec = wtm->tv_sec;
vdso_data->wtom_clock_nsec = wtm->tv_nsec;
vdso_data->mult = tk->tkr.mult;
vdso_data->shift = tk->tkr.shift;
smp_wmb();
++vdso_data->tb_update_count;
write_seqcount_begin(&vdso_data->tb_seq);
vdso_data->cycle_last = tk->tkr.cycle_last;
vdso_data->mask = tk->tkr.mask;
vdso_data->mult = tk->tkr.mult;
vdso_data->shift = tk->tkr.shift;
vdso_data->wall_time_sec = tk->xtime_sec;
vdso_data->wall_time_snsec = tk->tkr.xtime_nsec;
vdso_data->monotonic_time_sec = tk->xtime_sec
+ tk->wall_to_monotonic.tv_sec;
vdso_data->monotonic_time_snsec = tk->tkr.xtime_nsec
+ ((u64)tk->wall_to_monotonic.tv_nsec
<< tk->tkr.shift);
while (vdso_data->monotonic_time_snsec >=
(((u64)NSEC_PER_SEC) << tk->tkr.shift)) {
vdso_data->monotonic_time_snsec -=
((u64)NSEC_PER_SEC) << tk->tkr.shift;
vdso_data->monotonic_time_sec++;
}
vdso_data->wall_time_coarse_sec = tk->xtime_sec;
vdso_data->wall_time_coarse_nsec = (long)(tk->tkr.xtime_nsec >>
tk->tkr.shift);
vdso_data->monotonic_time_coarse_sec =
vdso_data->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
vdso_data->monotonic_time_coarse_nsec =
vdso_data->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
while (vdso_data->monotonic_time_coarse_nsec >= NSEC_PER_SEC) {
vdso_data->monotonic_time_coarse_nsec -= NSEC_PER_SEC;
vdso_data->monotonic_time_coarse_sec++;
}
write_seqcount_end(&vdso_data->tb_seq);
}

View File

@ -277,7 +277,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
if (fixup_exception(regs)) /* ILL_TRANS or UNALIGN_DATA */
return;
if (fault_num >= 0 &&
fault_num < sizeof(int_name)/sizeof(int_name[0]) &&
fault_num < ARRAY_SIZE(int_name) &&
int_name[fault_num] != NULL)
name = int_name[fault_num];
else

View File

@ -82,6 +82,8 @@ VERSION
__vdso_rt_sigreturn;
__vdso_gettimeofday;
gettimeofday;
__vdso_clock_gettime;
clock_gettime;
local:*;
};
}

View File

@ -15,6 +15,7 @@
#define VDSO_BUILD /* avoid some shift warnings for -m32 in <asm/page.h> */
#include <linux/time.h>
#include <asm/timex.h>
#include <asm/unistd.h>
#include <asm/vdso.h>
#if CHIP_HAS_SPLIT_CYCLE()
@ -35,6 +36,11 @@ static inline cycles_t get_cycles_inline(void)
#define get_cycles get_cycles_inline
#endif
struct syscall_return_value {
long value;
long error;
};
/*
* Find out the vDSO data page address in the process address space.
*/
@ -50,59 +56,143 @@ inline unsigned long get_datapage(void)
return ret;
}
int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
static inline u64 vgetsns(struct vdso_data *vdso)
{
cycles_t cycles;
unsigned long count, sec, ns;
volatile struct vdso_data *vdso_data;
return ((get_cycles() - vdso->cycle_last) & vdso->mask) * vdso->mult;
}
vdso_data = (struct vdso_data *)get_datapage();
/* The use of the timezone is obsolete, normally tz is NULL. */
if (unlikely(tz != NULL)) {
while (1) {
/* Spin until the update finish. */
count = vdso_data->tz_update_count;
if (count & 1)
continue;
static inline int do_realtime(struct vdso_data *vdso, struct timespec *ts)
{
unsigned count;
u64 ns;
tz->tz_minuteswest = vdso_data->tz_minuteswest;
tz->tz_dsttime = vdso_data->tz_dsttime;
do {
count = read_seqcount_begin(&vdso->tb_seq);
ts->tv_sec = vdso->wall_time_sec;
ns = vdso->wall_time_snsec;
ns += vgetsns(vdso);
ns >>= vdso->shift;
} while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
/* Check whether updated, read again if so. */
if (count == vdso_data->tz_update_count)
break;
}
}
if (unlikely(tv == NULL))
return 0;
while (1) {
/* Spin until the update finish. */
count = vdso_data->tb_update_count;
if (count & 1)
continue;
sec = vdso_data->xtime_clock_sec;
cycles = get_cycles() - vdso_data->xtime_tod_stamp;
ns = (cycles * vdso_data->mult) + vdso_data->xtime_clock_nsec;
ns >>= vdso_data->shift;
if (ns >= NSEC_PER_SEC) {
ns -= NSEC_PER_SEC;
sec += 1;
}
/* Check whether updated, read again if so. */
if (count == vdso_data->tb_update_count)
break;
}
tv->tv_sec = sec;
tv->tv_usec = ns / 1000;
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
ts->tv_nsec = ns;
return 0;
}
static inline int do_monotonic(struct vdso_data *vdso, struct timespec *ts)
{
unsigned count;
u64 ns;
do {
count = read_seqcount_begin(&vdso->tb_seq);
ts->tv_sec = vdso->monotonic_time_sec;
ns = vdso->monotonic_time_snsec;
ns += vgetsns(vdso);
ns >>= vdso->shift;
} while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
ts->tv_nsec = ns;
return 0;
}
static inline int do_realtime_coarse(struct vdso_data *vdso,
struct timespec *ts)
{
unsigned count;
do {
count = read_seqcount_begin(&vdso->tb_seq);
ts->tv_sec = vdso->wall_time_coarse_sec;
ts->tv_nsec = vdso->wall_time_coarse_nsec;
} while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
return 0;
}
static inline int do_monotonic_coarse(struct vdso_data *vdso,
struct timespec *ts)
{
unsigned count;
do {
count = read_seqcount_begin(&vdso->tb_seq);
ts->tv_sec = vdso->monotonic_time_coarse_sec;
ts->tv_nsec = vdso->monotonic_time_coarse_nsec;
} while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
return 0;
}
struct syscall_return_value __vdso_gettimeofday(struct timeval *tv,
struct timezone *tz)
{
struct syscall_return_value ret = { 0, 0 };
unsigned count;
struct vdso_data *vdso = (struct vdso_data *)get_datapage();
/* The use of the timezone is obsolete, normally tz is NULL. */
if (unlikely(tz != NULL)) {
do {
count = read_seqcount_begin(&vdso->tz_seq);
tz->tz_minuteswest = vdso->tz_minuteswest;
tz->tz_dsttime = vdso->tz_dsttime;
} while (unlikely(read_seqcount_retry(&vdso->tz_seq, count)));
}
if (unlikely(tv == NULL))
return ret;
do_realtime(vdso, (struct timespec *)tv);
tv->tv_usec /= 1000;
return ret;
}
int gettimeofday(struct timeval *tv, struct timezone *tz)
__attribute__((weak, alias("__vdso_gettimeofday")));
static struct syscall_return_value vdso_fallback_gettime(long clock,
struct timespec *ts)
{
struct syscall_return_value ret;
__asm__ __volatile__ (
"swint1"
: "=R00" (ret.value), "=R01" (ret.error)
: "R10" (__NR_clock_gettime), "R00" (clock), "R01" (ts)
: "r2", "r3", "r4", "r5", "r6", "r7",
"r8", "r9", "r11", "r12", "r13", "r14", "r15",
"r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
"r24", "r25", "r26", "r27", "r28", "r29", "memory");
return ret;
}
struct syscall_return_value __vdso_clock_gettime(clockid_t clock,
struct timespec *ts)
{
struct vdso_data *vdso = (struct vdso_data *)get_datapage();
struct syscall_return_value ret = { 0, 0 };
switch (clock) {
case CLOCK_REALTIME:
do_realtime(vdso, ts);
return ret;
case CLOCK_MONOTONIC:
do_monotonic(vdso, ts);
return ret;
case CLOCK_REALTIME_COARSE:
do_realtime_coarse(vdso, ts);
return ret;
case CLOCK_MONOTONIC_COARSE:
do_monotonic_coarse(vdso, ts);
return ret;
default:
return vdso_fallback_gettime(clock, ts);
}
}
int clock_gettime(clockid_t clock, struct timespec *ts)
__attribute__((weak, alias("__vdso_clock_gettime")));

View File

@ -66,11 +66,9 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_begin = .;
VMLINUX_SYMBOL(_sinitdata) = .;
INIT_DATA_SECTION(16) :data =0
PERCPU_SECTION(L2_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
VMLINUX_SYMBOL(_einitdata) = .;
__init_end = .;
_sdata = .; /* Start of data section */

View File

@ -254,8 +254,8 @@ static pgprot_t __init init_pgprot(ulong address)
* Everything else that isn't data or bss is heap, so mark it
* with the initial heap home (hash-for-home, or this cpu). This
* includes any addresses after the loaded image and any address before
* _einitdata, since we already captured the case of text before
* _sinittext, and __pa(einittext) is approximately __pa(sinitdata).
* __init_end, since we already captured the case of text before
* _sinittext, and __pa(einittext) is approximately __pa(__init_begin).
*
* All the LOWMEM pages that we mark this way will get their
* struct page homecache properly marked later, in set_page_homes().
@ -263,7 +263,7 @@ static pgprot_t __init init_pgprot(ulong address)
* homes, but with a zero free_time we don't have to actually
* do a flush action the first time we use them, either.
*/
if (address >= (ulong) _end || address < (ulong) _einitdata)
if (address >= (ulong) _end || address < (ulong) __init_end)
return construct_pgprot(PAGE_KERNEL, initial_heap_home());
/* Use hash-for-home if requested for data/bss. */
@ -632,7 +632,7 @@ int devmem_is_allowed(unsigned long pagenr)
{
return pagenr < kaddr_to_pfn(_end) &&
!(pagenr >= kaddr_to_pfn(&init_thread_union) ||
pagenr < kaddr_to_pfn(_einitdata)) &&
pagenr < kaddr_to_pfn(__init_end)) &&
!(pagenr >= kaddr_to_pfn(_sinittext) ||
pagenr <= kaddr_to_pfn(_einittext-1));
}
@ -975,8 +975,8 @@ void free_initmem(void)
/* Free the data pages that we won't use again after init. */
free_init_pages("unused kernel data",
(unsigned long)_sinitdata,
(unsigned long)_einitdata);
(unsigned long)__init_begin,
(unsigned long)__init_end);
/*
* Free the pages mapped from 0xc0000000 that correspond to code

View File

@ -76,6 +76,7 @@ MODULE_LICENSE("GPL");
static int srom_devs; /* Number of SROM partitions */
static struct cdev srom_cdev;
static struct platform_device *srom_parent;
static struct class *srom_class;
static struct srom_dev *srom_devices;
@ -350,7 +351,7 @@ static int srom_setup_minor(struct srom_dev *srom, int index)
SROM_PAGE_SIZE_OFF, sizeof(srom->page_size)) < 0)
return -EIO;
dev = device_create(srom_class, &platform_bus,
dev = device_create(srom_class, &srom_parent->dev,
MKDEV(srom_major, index), srom, "%d", index);
return PTR_ERR_OR_ZERO(dev);
}
@ -415,6 +416,13 @@ static int srom_init(void)
if (result < 0)
goto fail_chrdev;
/* Create a parent device */
srom_parent = platform_device_register_simple("srom", -1, NULL, 0);
if (IS_ERR(srom_parent)) {
result = PTR_ERR(srom_parent);
goto fail_pdev;
}
/* Create a sysfs class. */
srom_class = class_create(THIS_MODULE, "srom");
if (IS_ERR(srom_class)) {
@ -438,6 +446,8 @@ fail_class:
device_destroy(srom_class, MKDEV(srom_major, i));
class_destroy(srom_class);
fail_cdev:
platform_device_unregister(srom_parent);
fail_pdev:
cdev_del(&srom_cdev);
fail_chrdev:
unregister_chrdev_region(dev, srom_devs);
@ -454,6 +464,7 @@ static void srom_cleanup(void)
device_destroy(srom_class, MKDEV(srom_major, i));
class_destroy(srom_class);
cdev_del(&srom_cdev);
platform_device_unregister(srom_parent);
unregister_chrdev_region(MKDEV(srom_major, 0), srom_devs);
kfree(srom_devices);
}