Commit 7b2a4306 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer updates from Thomas Gleixner:
 "The timer departement provides:

   - More y2038 work in the area of ntp and pps.

   - Optimization of posix cpu timers

   - New time related selftests

   - Some new clocksource drivers

   - The usual pile of fixes, cleanups and improvements"

* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (25 commits)
  timeconst: Update path in comment
  timers/x86/hpet: Type adjustments
  clocksource/drivers/armada-370-xp: Implement ARM delay timer
  clocksource/drivers/tango_xtal: Add new timer for Tango SoCs
  clocksource/drivers/imx: Allow timer irq affinity change
  clocksource/drivers/exynos_mct: Use container_of() instead of this_cpu_ptr()
  clocksource/drivers/h8300_*: Remove unneeded memset()s
  clocksource/drivers/sh_cmt: Remove unneeded memset() in sh_cmt_setup()
  clocksource/drivers/em_sti: Remove unneeded memset()s
  clocksource/drivers/mediatek: Use GPT as sched clock source
  clockevents/drivers/mtk: Fix spurious interrupt leading to crash
  posix_cpu_timer: Reduce unnecessary sighand lock contention
  posix_cpu_timer: Convert cputimer->running to bool
  posix_cpu_timer: Check thread timers only when there are active thread timers
  posix_cpu_timer: Optimize fastpath_timer_check()
  timers, kselftest: Add 'adjtick' test to validate adjtimex() tick adjustments
  timers: Use __fls in apply_slack()
  clocksource: Remove return statement from void functions
  net: sfc: avoid using timespec
  ntp/pps: use y2038 safe types in pps_event_time
  ...
parents 316dde2f 03f136a2
...@@ -63,10 +63,10 @@ ...@@ -63,10 +63,10 @@
/* hpet memory map physical address */ /* hpet memory map physical address */
extern unsigned long hpet_address; extern unsigned long hpet_address;
extern unsigned long force_hpet_address; extern unsigned long force_hpet_address;
extern int boot_hpet_disable; extern bool boot_hpet_disable;
extern u8 hpet_blockid; extern u8 hpet_blockid;
extern int hpet_force_user; extern bool hpet_force_user;
extern u8 hpet_msi_disable; extern bool hpet_msi_disable;
extern int is_hpet_enabled(void); extern int is_hpet_enabled(void);
extern int hpet_enable(void); extern int hpet_enable(void);
extern void hpet_disable(void); extern void hpet_disable(void);
......
...@@ -584,7 +584,7 @@ static void __init intel_graphics_stolen(int num, int slot, int func) ...@@ -584,7 +584,7 @@ static void __init intel_graphics_stolen(int num, int slot, int func)
static void __init force_disable_hpet(int num, int slot, int func) static void __init force_disable_hpet(int num, int slot, int func)
{ {
#ifdef CONFIG_HPET_TIMER #ifdef CONFIG_HPET_TIMER
boot_hpet_disable = 1; boot_hpet_disable = true;
pr_info("x86/hpet: Will disable the HPET for this platform because it's not reliable\n"); pr_info("x86/hpet: Will disable the HPET for this platform because it's not reliable\n");
#endif #endif
} }
......
...@@ -37,10 +37,10 @@ ...@@ -37,10 +37,10 @@
*/ */
unsigned long hpet_address; unsigned long hpet_address;
u8 hpet_blockid; /* OS timer block num */ u8 hpet_blockid; /* OS timer block num */
u8 hpet_msi_disable; bool hpet_msi_disable;
#ifdef CONFIG_PCI_MSI #ifdef CONFIG_PCI_MSI
static unsigned long hpet_num_timers; static unsigned int hpet_num_timers;
#endif #endif
static void __iomem *hpet_virt_address; static void __iomem *hpet_virt_address;
...@@ -86,9 +86,9 @@ static inline void hpet_clear_mapping(void) ...@@ -86,9 +86,9 @@ static inline void hpet_clear_mapping(void)
/* /*
* HPET command line enable / disable * HPET command line enable / disable
*/ */
int boot_hpet_disable; bool boot_hpet_disable;
int hpet_force_user; bool hpet_force_user;
static int hpet_verbose; static bool hpet_verbose;
static int __init hpet_setup(char *str) static int __init hpet_setup(char *str)
{ {
...@@ -98,11 +98,11 @@ static int __init hpet_setup(char *str) ...@@ -98,11 +98,11 @@ static int __init hpet_setup(char *str)
if (next) if (next)
*next++ = 0; *next++ = 0;
if (!strncmp("disable", str, 7)) if (!strncmp("disable", str, 7))
boot_hpet_disable = 1; boot_hpet_disable = true;
if (!strncmp("force", str, 5)) if (!strncmp("force", str, 5))
hpet_force_user = 1; hpet_force_user = true;
if (!strncmp("verbose", str, 7)) if (!strncmp("verbose", str, 7))
hpet_verbose = 1; hpet_verbose = true;
str = next; str = next;
} }
return 1; return 1;
...@@ -111,7 +111,7 @@ __setup("hpet=", hpet_setup); ...@@ -111,7 +111,7 @@ __setup("hpet=", hpet_setup);
static int __init disable_hpet(char *str) static int __init disable_hpet(char *str)
{ {
boot_hpet_disable = 1; boot_hpet_disable = true;
return 1; return 1;
} }
__setup("nohpet", disable_hpet); __setup("nohpet", disable_hpet);
...@@ -124,7 +124,7 @@ static inline int is_hpet_capable(void) ...@@ -124,7 +124,7 @@ static inline int is_hpet_capable(void)
/* /*
* HPET timer interrupt enable / disable * HPET timer interrupt enable / disable
*/ */
static int hpet_legacy_int_enabled; static bool hpet_legacy_int_enabled;
/** /**
* is_hpet_enabled - check whether the hpet timer interrupt is enabled * is_hpet_enabled - check whether the hpet timer interrupt is enabled
...@@ -230,7 +230,7 @@ static struct clock_event_device hpet_clockevent; ...@@ -230,7 +230,7 @@ static struct clock_event_device hpet_clockevent;
static void hpet_stop_counter(void) static void hpet_stop_counter(void)
{ {
unsigned long cfg = hpet_readl(HPET_CFG); u32 cfg = hpet_readl(HPET_CFG);
cfg &= ~HPET_CFG_ENABLE; cfg &= ~HPET_CFG_ENABLE;
hpet_writel(cfg, HPET_CFG); hpet_writel(cfg, HPET_CFG);
} }
...@@ -272,7 +272,7 @@ static void hpet_enable_legacy_int(void) ...@@ -272,7 +272,7 @@ static void hpet_enable_legacy_int(void)
cfg |= HPET_CFG_LEGACY; cfg |= HPET_CFG_LEGACY;
hpet_writel(cfg, HPET_CFG); hpet_writel(cfg, HPET_CFG);
hpet_legacy_int_enabled = 1; hpet_legacy_int_enabled = true;
} }
static void hpet_legacy_clockevent_register(void) static void hpet_legacy_clockevent_register(void)
...@@ -983,7 +983,7 @@ void hpet_disable(void) ...@@ -983,7 +983,7 @@ void hpet_disable(void)
cfg = *hpet_boot_cfg; cfg = *hpet_boot_cfg;
else if (hpet_legacy_int_enabled) { else if (hpet_legacy_int_enabled) {
cfg &= ~HPET_CFG_LEGACY; cfg &= ~HPET_CFG_LEGACY;
hpet_legacy_int_enabled = 0; hpet_legacy_int_enabled = false;
} }
cfg &= ~HPET_CFG_ENABLE; cfg &= ~HPET_CFG_ENABLE;
hpet_writel(cfg, HPET_CFG); hpet_writel(cfg, HPET_CFG);
...@@ -1121,8 +1121,7 @@ EXPORT_SYMBOL_GPL(hpet_rtc_timer_init); ...@@ -1121,8 +1121,7 @@ EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
static void hpet_disable_rtc_channel(void) static void hpet_disable_rtc_channel(void)
{ {
unsigned long cfg; u32 cfg = hpet_readl(HPET_T1_CFG);
cfg = hpet_readl(HPET_T1_CFG);
cfg &= ~HPET_TN_ENABLE; cfg &= ~HPET_TN_ENABLE;
hpet_writel(cfg, HPET_T1_CFG); hpet_writel(cfg, HPET_T1_CFG);
} }
......
...@@ -524,7 +524,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E6XX_CU, ...@@ -524,7 +524,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E6XX_CU,
*/ */
static void force_disable_hpet_msi(struct pci_dev *unused) static void force_disable_hpet_msi(struct pci_dev *unused)
{ {
hpet_msi_disable = 1; hpet_msi_disable = true;
} }
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
......
...@@ -279,6 +279,10 @@ config CLKSRC_MIPS_GIC ...@@ -279,6 +279,10 @@ config CLKSRC_MIPS_GIC
depends on MIPS_GIC depends on MIPS_GIC
select CLKSRC_OF select CLKSRC_OF
config CLKSRC_TANGO_XTAL
bool
select CLKSRC_OF
config CLKSRC_PXA config CLKSRC_PXA
def_bool y if ARCH_PXA || ARCH_SA1100 def_bool y if ARCH_PXA || ARCH_SA1100
select CLKSRC_OF if OF select CLKSRC_OF if OF
......
...@@ -56,6 +56,7 @@ obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o ...@@ -56,6 +56,7 @@ obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o
obj-$(CONFIG_ARCH_INTEGRATOR_AP) += timer-integrator-ap.o obj-$(CONFIG_ARCH_INTEGRATOR_AP) += timer-integrator-ap.o
obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o
obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o
obj-$(CONFIG_CLKSRC_TANGO_XTAL) += tango_xtal.o
obj-$(CONFIG_CLKSRC_IMX_GPT) += timer-imx-gpt.o obj-$(CONFIG_CLKSRC_IMX_GPT) += timer-imx-gpt.o
obj-$(CONFIG_ASM9260_TIMER) += asm9260_timer.o obj-$(CONFIG_ASM9260_TIMER) += asm9260_timer.o
obj-$(CONFIG_H8300) += h8300_timer8.o obj-$(CONFIG_H8300) += h8300_timer8.o
......
...@@ -228,7 +228,6 @@ static int em_sti_register_clocksource(struct em_sti_priv *p) ...@@ -228,7 +228,6 @@ static int em_sti_register_clocksource(struct em_sti_priv *p)
{ {
struct clocksource *cs = &p->cs; struct clocksource *cs = &p->cs;
memset(cs, 0, sizeof(*cs));
cs->name = dev_name(&p->pdev->dev); cs->name = dev_name(&p->pdev->dev);
cs->rating = 200; cs->rating = 200;
cs->read = em_sti_clocksource_read; cs->read = em_sti_clocksource_read;
...@@ -285,7 +284,6 @@ static void em_sti_register_clockevent(struct em_sti_priv *p) ...@@ -285,7 +284,6 @@ static void em_sti_register_clockevent(struct em_sti_priv *p)
{ {
struct clock_event_device *ced = &p->ced; struct clock_event_device *ced = &p->ced;
memset(ced, 0, sizeof(*ced));
ced->name = dev_name(&p->pdev->dev); ced->name = dev_name(&p->pdev->dev);
ced->features = CLOCK_EVT_FEAT_ONESHOT; ced->features = CLOCK_EVT_FEAT_ONESHOT;
ced->rating = 200; ced->rating = 200;
......
...@@ -382,24 +382,28 @@ static void exynos4_mct_tick_start(unsigned long cycles, ...@@ -382,24 +382,28 @@ static void exynos4_mct_tick_start(unsigned long cycles,
static int exynos4_tick_set_next_event(unsigned long cycles, static int exynos4_tick_set_next_event(unsigned long cycles,
struct clock_event_device *evt) struct clock_event_device *evt)
{ {
struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); struct mct_clock_event_device *mevt;
mevt = container_of(evt, struct mct_clock_event_device, evt);
exynos4_mct_tick_start(cycles, mevt); exynos4_mct_tick_start(cycles, mevt);
return 0; return 0;
} }
static int set_state_shutdown(struct clock_event_device *evt) static int set_state_shutdown(struct clock_event_device *evt)
{ {
exynos4_mct_tick_stop(this_cpu_ptr(&percpu_mct_tick)); struct mct_clock_event_device *mevt;
mevt = container_of(evt, struct mct_clock_event_device, evt);
exynos4_mct_tick_stop(mevt);
return 0; return 0;
} }
static int set_state_periodic(struct clock_event_device *evt) static int set_state_periodic(struct clock_event_device *evt)
{ {
struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); struct mct_clock_event_device *mevt;
unsigned long cycles_per_jiffy; unsigned long cycles_per_jiffy;
mevt = container_of(evt, struct mct_clock_event_device, evt);
cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult) cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult)
>> evt->shift); >> evt->shift);
exynos4_mct_tick_stop(mevt); exynos4_mct_tick_stop(mevt);
......
...@@ -153,7 +153,6 @@ static int timer16_setup(struct timer16_priv *p, struct platform_device *pdev) ...@@ -153,7 +153,6 @@ static int timer16_setup(struct timer16_priv *p, struct platform_device *pdev)
int ret, irq; int ret, irq;
unsigned int ch; unsigned int ch;
memset(p, 0, sizeof(*p));
p->pdev = pdev; p->pdev = pdev;
res[REG_CH] = platform_get_resource(p->pdev, res[REG_CH] = platform_get_resource(p->pdev,
......
...@@ -215,7 +215,6 @@ static int timer8_setup(struct timer8_priv *p, ...@@ -215,7 +215,6 @@ static int timer8_setup(struct timer8_priv *p,
int irq; int irq;
int ret; int ret;
memset(p, 0, sizeof(*p));
p->pdev = pdev; p->pdev = pdev;
res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
......
...@@ -123,7 +123,6 @@ static int __init tpu_setup(struct tpu_priv *p, struct platform_device *pdev) ...@@ -123,7 +123,6 @@ static int __init tpu_setup(struct tpu_priv *p, struct platform_device *pdev)
{ {
struct resource *res[2]; struct resource *res[2];
memset(p, 0, sizeof(*p));
p->pdev = pdev; p->pdev = pdev;
res[CH_L] = platform_get_resource(p->pdev, IORESOURCE_MEM, CH_L); res[CH_L] = platform_get_resource(p->pdev, IORESOURCE_MEM, CH_L);
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/of_irq.h> #include <linux/of_irq.h>
#include <linux/sched_clock.h>
#include <linux/slab.h> #include <linux/slab.h>
#define GPT_IRQ_EN_REG 0x00 #define GPT_IRQ_EN_REG 0x00
...@@ -59,6 +60,13 @@ struct mtk_clock_event_device { ...@@ -59,6 +60,13 @@ struct mtk_clock_event_device {
struct clock_event_device dev; struct clock_event_device dev;
}; };
static void __iomem *gpt_sched_reg __read_mostly;
static u64 notrace mtk_read_sched_clock(void)
{
return readl_relaxed(gpt_sched_reg);
}
static inline struct mtk_clock_event_device *to_mtk_clk( static inline struct mtk_clock_event_device *to_mtk_clk(
struct clock_event_device *c) struct clock_event_device *c)
{ {
...@@ -141,14 +149,6 @@ static irqreturn_t mtk_timer_interrupt(int irq, void *dev_id) ...@@ -141,14 +149,6 @@ static irqreturn_t mtk_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void mtk_timer_global_reset(struct mtk_clock_event_device *evt)
{
/* Disable all interrupts */
writel(0x0, evt->gpt_base + GPT_IRQ_EN_REG);
/* Acknowledge all interrupts */
writel(0x3f, evt->gpt_base + GPT_IRQ_ACK_REG);
}
static void static void
mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option) mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option)
{ {
...@@ -168,6 +168,12 @@ static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer) ...@@ -168,6 +168,12 @@ static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer)
{ {
u32 val; u32 val;
/* Disable all interrupts */
writel(0x0, evt->gpt_base + GPT_IRQ_EN_REG);
/* Acknowledge all spurious pending interrupts */
writel(0x3f, evt->gpt_base + GPT_IRQ_ACK_REG);
val = readl(evt->gpt_base + GPT_IRQ_EN_REG); val = readl(evt->gpt_base + GPT_IRQ_EN_REG);
writel(val | GPT_IRQ_ENABLE(timer), writel(val | GPT_IRQ_ENABLE(timer),
evt->gpt_base + GPT_IRQ_EN_REG); evt->gpt_base + GPT_IRQ_EN_REG);
...@@ -220,8 +226,6 @@ static void __init mtk_timer_init(struct device_node *node) ...@@ -220,8 +226,6 @@ static void __init mtk_timer_init(struct device_node *node)
} }
rate = clk_get_rate(clk); rate = clk_get_rate(clk);
mtk_timer_global_reset(evt);
if (request_irq(evt->dev.irq, mtk_timer_interrupt, if (request_irq(evt->dev.irq, mtk_timer_interrupt,
IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) { IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) {
pr_warn("failed to setup irq %d\n", evt->dev.irq); pr_warn("failed to setup irq %d\n", evt->dev.irq);
...@@ -234,6 +238,8 @@ static void __init mtk_timer_init(struct device_node *node) ...@@ -234,6 +238,8 @@ static void __init mtk_timer_init(struct device_node *node)
mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN); mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN);
clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC), clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC),
node->name, rate, 300, 32, clocksource_mmio_readl_up); node->name, rate, 300, 32, clocksource_mmio_readl_up);
gpt_sched_reg = evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC);
sched_clock_register(mtk_read_sched_clock, 32, rate);
/* Configure clock event */ /* Configure clock event */
mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT); mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT);
......
...@@ -962,7 +962,6 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) ...@@ -962,7 +962,6 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
unsigned int i; unsigned int i;
int ret; int ret;
memset(cmt, 0, sizeof(*cmt));
cmt->pdev = pdev; cmt->pdev = pdev;
raw_spin_lock_init(&cmt->lock); raw_spin_lock_init(&cmt->lock);
......
#include <linux/clocksource.h>
#include <linux/sched_clock.h>
#include <linux/of_address.h>
#include <linux/printk.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/clk.h>
static void __iomem *xtal_in_cnt;
static struct delay_timer delay_timer;
static unsigned long notrace read_xtal_counter(void)
{
return readl_relaxed(xtal_in_cnt);
}
static u64 notrace read_sched_clock(void)
{
return read_xtal_counter();
}
static cycle_t read_clocksource(struct clocksource *cs)
{
return read_xtal_counter();
}
static struct clocksource tango_xtal = {
.name = "tango-xtal",
.rating = 350,
.read = read_clocksource,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static void __init tango_clocksource_init(struct device_node *np)
{
struct clk *clk;
int xtal_freq, ret;
xtal_in_cnt = of_iomap(np, 0);
if (xtal_in_cnt == NULL) {
pr_err("%s: invalid address\n", np->full_name);
return;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("%s: invalid clock\n", np->full_name);
return;
}
xtal_freq = clk_get_rate(clk);
delay_timer.freq = xtal_freq;
delay_timer.read_current_timer = read_xtal_counter;
ret = clocksource_register_hz(&tango_xtal, xtal_freq);
if (ret != 0) {
pr_err("%s: registration failed\n", np->full_name);
return;
}
sched_clock_register(read_sched_clock, 32, xtal_freq);
register_current_timer_delay(&delay_timer);
}
CLOCKSOURCE_OF_DECLARE(tango, "sigma,tick-counter", tango_clocksource_init);
...@@ -45,6 +45,8 @@ ...@@ -45,6 +45,8 @@
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/syscore_ops.h> #include <linux/syscore_ops.h>
#include <asm/delay.h>
/* /*
* Timer block registers. * Timer block registers.
*/ */
...@@ -249,6 +251,15 @@ struct syscore_ops armada_370_xp_timer_syscore_ops = { ...@@ -249,6 +251,15 @@ struct syscore_ops armada_370_xp_timer_syscore_ops = {
.resume = armada_370_xp_timer_resume, .resume = armada_370_xp_timer_resume,
}; };
static unsigned long armada_370_delay_timer_read(void)
{
return ~readl(timer_base + TIMER0_VAL_OFF);
}
static struct delay_timer armada_370_delay_timer = {
.read_current_timer = armada_370_delay_timer_read,
};
static void __init armada_370_xp_timer_common_init(struct device_node *np) static void __init armada_370_xp_timer_common_init(struct device_node *np)
{ {
u32 clr = 0, set = 0; u32 clr = 0, set = 0;
...@@ -287,6 +298,9 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np) ...@@ -287,6 +298,9 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
TIMER0_RELOAD_EN | enable_mask, TIMER0_RELOAD_EN | enable_mask,
TIMER0_RELOAD_EN | enable_mask); TIMER0_RELOAD_EN | enable_mask);
armada_370_delay_timer.freq = timer_clk;
register_current_timer_delay(&armada_370_delay_timer);
/* /*
* Set scale and timer for sched_clock. * Set scale and timer for sched_clock.
*/ */
......
...@@ -305,13 +305,14 @@ static int __init mxc_clockevent_init(struct imx_timer *imxtm) ...@@ -305,13 +305,14 @@ static int __init mxc_clockevent_init(struct imx_timer *imxtm)
struct irqaction *act = &imxtm->act; struct irqaction *act = &imxtm->act;
ced->name = "mxc_timer1"; ced->name = "mxc_timer1";
ced->features = CLOCK_EVT_FEAT_ONESHOT; ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
ced->set_state_shutdown = mxc_shutdown; ced->set_state_shutdown = mxc_shutdown;
ced->set_state_oneshot = mxc_set_oneshot; ced->set_state_oneshot = mxc_set_oneshot;
ced->tick_resume = mxc_shutdown; ced->tick_resume = mxc_shutdown;
ced->set_next_event = imxtm->gpt->set_next_event; ced->set_next_event = imxtm->gpt->set_next_event;
ced->rating = 200; ced->rating = 200;
ced->cpumask = cpumask_of(0); ced->cpumask = cpumask_of(0);
ced->irq = imxtm->irq;
clockevents_config_and_register(ced, clk_get_rate(imxtm->clk_per), clockevents_config_and_register(ced, clk_get_rate(imxtm->clk_per),
0xff, 0xfffffffe); 0xff, 0xfffffffe);
......
...@@ -401,8 +401,8 @@ size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats) ...@@ -401,8 +401,8 @@ size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats)
/* For Siena platforms NIC time is s and ns */ /* For Siena platforms NIC time is s and ns */
static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor) static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor)
{ {
struct timespec ts = ns_to_timespec(ns); struct timespec64 ts = ns_to_timespec64(ns);
*nic_major = ts.tv_sec; *nic_major = (u32)ts.tv_sec;
*nic_minor = ts.tv_nsec; *nic_minor = ts.tv_nsec;
} }
...@@ -431,8 +431,8 @@ static ktime_t efx_ptp_s_ns_to_ktime_correction(u32 nic_major, u32 nic_minor, ...@@ -431,8 +431,8 @@ static ktime_t efx_ptp_s_ns_to_ktime_correction(u32 nic_major, u32 nic_minor,
*/ */
static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor) static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor)
{ {
struct timespec ts = ns_to_timespec(ns); struct timespec64 ts = ns_to_timespec64(ns);
u32 maj = ts.tv_sec; u32 maj = (u32)ts.tv_sec;
u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT + u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT +
(1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT); (1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT);
...@@ -646,28 +646,28 @@ static void efx_ptp_send_times(struct efx_nic *efx, ...@@ -646,28 +646,28 @@ static void efx_ptp_send_times(struct efx_nic *efx,
struct pps_event_time *last_time) struct pps_event_time *last_time)
{ {
struct pps_event_time now; struct pps_event_time now;
struct timespec limit; struct timespec64 limit;
struct efx_ptp_data *ptp = efx->ptp_data; struct efx_ptp_data *ptp = efx->ptp_data;
struct timespec start; struct timespec64 start;
int *mc_running = ptp->start.addr; int *mc_running = ptp->start.addr;
pps_get_ts(&now); pps_get_ts(&now);
start = now.ts_real; start = now.ts_real;
limit = now.ts_real; limit = now.ts_real;
timespec_add_ns(&limit, SYNCHRONISE_PERIOD_NS); timespec64_add_ns(&limit, SYNCHRONISE_PERIOD_NS);
/* Write host time for specified period or until MC is done */ /* Write host time for specified period or until MC is done */
while ((timespec_compare(&now.ts_real, &limit) < 0) && while ((timespec64_compare(&now.ts_real, &limit) < 0) &&
ACCESS_ONCE(*mc_running)) { ACCESS_ONCE(*mc_running)) {
struct timespec update_time; struct timespec64 update_time;
unsigned int host_time; unsigned int host_time;
/* Don't update continuously to avoid saturating the PCIe bus */ /* Don't update continuously to avoid saturating the PCIe bus */
update_time = now.ts_real; update_time = now.ts_real;
timespec_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS); timespec64_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS);
do { do {
pps_get_ts(&now); pps_get_ts(&now);
} while ((timespec_compare(&now.ts_real, &update_time) < 0) && } while ((timespec64_compare(&now.ts_real, &update_time) < 0) &&
ACCESS_ONCE(*mc_running)); ACCESS_ONCE(*mc_running));
/* Synchronise NIC with single word of time only */ /* Synchronise NIC with single word of time only */
...@@ -723,7 +723,7 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf), ...@@ -723,7 +723,7 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
struct efx_ptp_data *ptp = efx->ptp_data; struct efx_ptp_data *ptp = efx->ptp_data;
u32 last_sec; u32 last_sec;
u32 start_sec; u32 start_sec;
struct timespec delta; struct timespec64 delta;
ktime_t mc_time; ktime_t mc_time;
if (number_readings == 0) if (number_readings == 0)
...@@ -737,14 +737,14 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf), ...@@ -737,14 +737,14 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
*/ */
for (i = 0; i < number_readings; i++) { for (i = 0; i < number_readings; i++) {
s32 window, corrected; s32 window, corrected;
struct timespec wait; struct timespec64 wait;
efx_ptp_read_timeset( efx_ptp_read_timeset(
MCDI_ARRAY_STRUCT_PTR(synch_buf, MCDI_ARRAY_STRUCT_PTR(synch_buf,
PTP_OUT_SYNCHRONIZE_TIMESET, i), PTP_OUT_SYNCHRONIZE_TIMESET, i),
&ptp->timeset[i]); &ptp->timeset[i]);
wait = ktime_to_timespec( wait = ktime_to_timespec64(
ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0)); ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0));
window = ptp->timeset[i].window; window = ptp->timeset[i].window;
corrected = window - wait.tv_nsec; corrected = window - wait.tv_nsec;
...@@ -803,7 +803,7 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf), ...@@ -803,7 +803,7 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
ptp->timeset[last_good].minor, 0); ptp->timeset[last_good].minor, 0);
/* Calculate delay from NIC top of second to last_time */ /* Calculate delay from NIC top of second to last_time */
delta.tv_nsec += ktime_to_timespec(mc_time).tv_nsec; delta.tv_nsec += ktime_to_timespec64(mc_time).tv_nsec;
/* Set PPS timestamp to match NIC top of second */ /* Set PPS timestamp to match NIC top of second */
ptp->host_time_pps = *last_time; ptp->host_time_pps = *last_time;
......
...@@ -179,8 +179,8 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event, ...@@ -179,8 +179,8 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
/* check event type */ /* check event type */
BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0); BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);
dev_dbg(pps->dev, "PPS event at %ld.%09ld\n", dev_dbg(pps->dev, "PPS event at %lld.%09ld\n",
ts->ts_real.tv_sec, ts->ts_real.tv_nsec); (s64)ts->ts_real.tv_sec, ts->ts_real.tv_nsec);
timespec_to_pps_ktime(&ts_real, ts->ts_real); timespec_to_pps_ktime(&ts_real, ts->ts_real);
......
...@@ -59,7 +59,8 @@ extern struct fs_struct init_fs; ...@@ -59,7 +59,8 @@ extern struct fs_struct init_fs;
.rlim = INIT_RLIMITS, \ .rlim = INIT_RLIMITS, \
.cputimer = { \ .cputimer = { \
.cputime_atomic = INIT_CPUTIME_ATOMIC, \ .cputime_atomic = INIT_CPUTIME_ATOMIC, \
.running = 0, \ .running = false, \
.checking_timer = false, \
}, \ }, \
INIT_PREV_CPUTIME(sig) \ INIT_PREV_CPUTIME(sig) \
.cred_guard_mutex = \ .cred_guard_mutex = \
......
...@@ -48,9 +48,9 @@ struct pps_source_info { ...@@ -48,9 +48,9 @@ struct pps_source_info {
struct pps_event_time { struct pps_event_time {
#ifdef CONFIG_NTP_PPS #ifdef CONFIG_NTP_PPS
struct timespec ts_raw; struct timespec64 ts_raw;
#endif /* CONFIG_NTP_PPS */ #endif /* CONFIG_NTP_PPS */
struct timespec ts_real; struct timespec64 ts_real;
}; };
/* The main struct */ /* The main struct */
...@@ -105,7 +105,7 @@ extern void pps_event(struct pps_device *pps, ...@@ -105,7 +105,7 @@ extern void pps_event(struct pps_device *pps,
struct pps_device *pps_lookup_dev(void const *cookie); struct pps_device *pps_lookup_dev(void const *cookie);
static inline void timespec_to_pps_ktime(struct pps_ktime *kt, static inline void timespec_to_pps_ktime(struct pps_ktime *kt,
struct timespec ts) struct timespec64 ts)
{ {
kt->sec = ts.tv_sec; kt->sec = ts.tv_sec;
kt->nsec = ts.tv_nsec; kt->nsec = ts.tv_nsec;
...@@ -115,24 +115,24 @@ static inline void timespec_to_pps_ktime(struct pps_ktime *kt, ...@@ -115,24 +115,24 @@ static inline void timespec_to_pps_ktime(struct pps_ktime *kt,
static inline void pps_get_ts(struct pps_event_time *ts) static inline void pps_get_ts(struct pps_event_time *ts)
{ {
getnstime_raw_and_real(&ts->ts_raw, &ts->ts_real); ktime_get_raw_and_real_ts64(&ts->ts_raw, &ts->ts_real);
} }
#else /* CONFIG_NTP_PPS */ #else /* CONFIG_NTP_PPS */
static inline void pps_get_ts(struct pps_event_time *ts) static inline void pps_get_ts(struct pps_event_time *ts)
{ {
getnstimeofday(&ts->ts_real); ktime_get_real_ts64(&ts->ts_real);
} }
#endif /* CONFIG_NTP_PPS */ #endif /* CONFIG_NTP_PPS */
/* Subtract known time delay from PPS event time(s) */ /* Subtract known time delay from PPS event time(s) */
static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec delta) static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec64 delta)
{ {
ts->ts_real = timespec_sub(ts->ts_real, delta); ts->ts_real = timespec64_sub(ts->ts_real, delta);
#ifdef CONFIG_NTP_PPS #ifdef CONFIG_NTP_PPS
ts->ts_raw = timespec_sub(ts->ts_raw, delta); ts->ts_raw = timespec64_sub(ts->ts_raw, delta);
#endif #endif
} }
......
...@@ -617,15 +617,18 @@ struct task_cputime_atomic { ...@@ -617,15 +617,18 @@ struct task_cputime_atomic {
/** /**
* struct thread_group_cputimer - thread group interval timer counts * struct thread_group_cputimer - thread group interval timer counts
* @cputime_atomic: atomic thread group interval timers. * @cputime_atomic: atomic thread group interval timers.
* @running: non-zero when there are timers running and * @running: true when there are timers running and
* @cputime receives updates. * @cputime_atomic receives updates.
* @checking_timer: true when a thread in the group is in the
* process of checking for thread group timers.
* *
* This structure contains the version of task_cputime, above, that is * This structure contains the version of task_cputime, above, that is
* used for thread group CPU timer calculations. * used for thread group CPU timer calculations.
*/ */
struct thread_group_cputimer { struct thread_group_cputimer {
struct task_cputime_atomic cputime_atomic; struct task_cputime_atomic cputime_atomic;
int running; bool running;
bool checking_timer;
}; };
#include <linux/rwsem.h> #include <linux/rwsem.h>
......
...@@ -263,8 +263,8 @@ extern void timekeeping_inject_sleeptime64(struct timespec64 *delta); ...@@ -263,8 +263,8 @@ extern void timekeeping_inject_sleeptime64(struct timespec64 *delta);
/* /*
* PPS accessor * PPS accessor
*/ */
extern void getnstime_raw_and_real(struct timespec *ts_raw, extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw,
struct timespec *ts_real); struct timespec64 *ts_real);
/* /*
* Persistent clock related interfaces * Persistent clock related interfaces
......
...@@ -152,7 +152,7 @@ extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */ ...@@ -152,7 +152,7 @@ extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */
#define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ) #define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ)
extern int do_adjtimex(struct timex *); extern int do_adjtimex(struct timex *);
extern void hardpps(const struct timespec *, const struct timespec *); extern void hardpps(const struct timespec64 *, const struct timespec64 *);
int read_current_timer(unsigned long *timer_val); int read_current_timer(unsigned long *timer_val);
void ntp_notify_cmos_timer(void); void ntp_notify_cmos_timer(void);
......
...@@ -1101,7 +1101,7 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) ...@@ -1101,7 +1101,7 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
if (cpu_limit != RLIM_INFINITY) { if (cpu_limit != RLIM_INFINITY) {
sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit); sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
sig->cputimer.running = 1; sig->cputimer.running = true;
} }
/* The timer lists. */ /* The timer lists. */
......
...@@ -479,7 +479,7 @@ static u32 clocksource_max_adjustment(struct clocksource *cs) ...@@ -479,7 +479,7 @@ static u32 clocksource_max_adjustment(struct clocksource *cs)
* return half the number of nanoseconds the hardware counter can technically * return half the number of nanoseconds the hardware counter can technically
* cover. This is done so that we can potentially detect problems caused by * cover. This is done so that we can potentially detect problems caused by
* delayed timers or bad hardware, which might result in time intervals that * delayed timers or bad hardware, which might result in time intervals that
* are larger then what the math used can handle without overflows. * are larger than what the math used can handle without overflows.
*/ */
u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc) u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc)
{ {
...@@ -595,16 +595,15 @@ static void __clocksource_select(bool skipcur) ...@@ -595,16 +595,15 @@ static void __clocksource_select(bool skipcur)
*/ */
static void clocksource_select(void) static void clocksource_select(void)
{ {
return __clocksource_select(false); __clocksource_select(false);
} }
static void clocksource_select_fallback(void) static void clocksource_select_fallback(void)
{ {
return __clocksource_select(true); __clocksource_select(true);
} }
#else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */ #else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */
static inline void clocksource_select(void) { } static inline void clocksource_select(void) { }
static inline void clocksource_select_fallback(void) { } static inline void clocksource_select_fallback(void) { }
......
...@@ -59,7 +59,7 @@ ...@@ -59,7 +59,7 @@
/* /*
* The timer bases: * The timer bases:
* *
* There are more clockids then hrtimer bases. Thus, we index * There are more clockids than hrtimer bases. Thus, we index
* into the timer bases by the hrtimer_base_type enum. When trying * into the timer bases by the hrtimer_base_type enum. When trying
* to reach a base using a clockid, hrtimer_clockid_to_base() * to reach a base using a clockid, hrtimer_clockid_to_base()
* is used to convert from clockid to the proper hrtimer_base_type. * is used to convert from clockid to the proper hrtimer_base_type.
......
...@@ -99,7 +99,7 @@ static time64_t ntp_next_leap_sec = TIME64_MAX; ...@@ -99,7 +99,7 @@ static time64_t ntp_next_leap_sec = TIME64_MAX;
static int pps_valid; /* signal watchdog counter */ static int pps_valid; /* signal watchdog counter */
static long pps_tf[3]; /* phase median filter */ static long pps_tf[3]; /* phase median filter */
static long pps_jitter; /* current jitter (ns) */ static long pps_jitter; /* current jitter (ns) */
static struct timespec pps_fbase; /* beginning of the last freq interval */ static struct timespec64 pps_fbase; /* beginning of the last freq interval */
static int pps_shift; /* current interval duration (s) (shift) */ static int pps_shift; /* current interval duration (s) (shift) */
static int pps_intcnt; /* interval counter */ static int pps_intcnt; /* interval counter */
static s64 pps_freq; /* frequency offset (scaled ns/s) */ static s64 pps_freq; /* frequency offset (scaled ns/s) */
...@@ -509,7 +509,7 @@ static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock); ...@@ -509,7 +509,7 @@ static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
static void sync_cmos_clock(struct work_struct *work) static void sync_cmos_clock(struct work_struct *work)
{ {
struct timespec64 now; struct timespec64 now;
struct timespec next; struct timespec64 next;
int fail = 1; int fail = 1;
/* /*
...@@ -559,7 +559,7 @@ static void sync_cmos_clock(struct work_struct *work) ...@@ -559,7 +559,7 @@ static void sync_cmos_clock(struct work_struct *work)
next.tv_nsec -= NSEC_PER_SEC; next.tv_nsec -= NSEC_PER_SEC;
} }
queue_delayed_work(system_power_efficient_wq, queue_delayed_work(system_power_efficient_wq,
&sync_cmos_work, timespec_to_jiffies(&next)); &sync_cmos_work, timespec64_to_jiffies(&next));
} }
void ntp_notify_cmos_timer(void) void ntp_notify_cmos_timer(void)
...@@ -773,13 +773,13 @@ int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai) ...@@ -773,13 +773,13 @@ int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai)
* pps_normtime.nsec has a range of ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] * pps_normtime.nsec has a range of ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ]
* while timespec.tv_nsec has a range of [0, NSEC_PER_SEC) */ * while timespec.tv_nsec has a range of [0, NSEC_PER_SEC) */
struct pps_normtime { struct pps_normtime {
__kernel_time_t sec; /* seconds */ s64 sec; /* seconds */
long nsec; /* nanoseconds */ long nsec; /* nanoseconds */
}; };
/* normalize the timestamp so that nsec is in the /* normalize the timestamp so that nsec is in the
( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] interval */ ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] interval */
static inline struct pps_normtime pps_normalize_ts(struct timespec ts) static inline struct pps_normtime pps_normalize_ts(struct timespec64 ts)
{ {
struct pps_normtime norm = { struct pps_normtime norm = {
.sec = ts.tv_sec, .sec = ts.tv_sec,
...@@ -861,7 +861,7 @@ static long hardpps_update_freq(struct pps_normtime freq_norm) ...@@ -861,7 +861,7 @@ static long hardpps_update_freq(struct pps_normtime freq_norm)
pps_errcnt++; pps_errcnt++;
pps_dec_freq_interval(); pps_dec_freq_interval();
printk_deferred(KERN_ERR printk_deferred(KERN_ERR
"hardpps: PPSERROR: interval too long - %ld s\n", "hardpps: PPSERROR: interval too long - %lld s\n",
freq_norm.sec); freq_norm.sec);
return 0; return 0;
} }
...@@ -948,7 +948,7 @@ static void hardpps_update_phase(long error) ...@@ -948,7 +948,7 @@ static void hardpps_update_phase(long error)
* This code is based on David Mills's reference nanokernel * This code is based on David Mills's reference nanokernel
* implementation. It was mostly rewritten but keeps the same idea. * implementation. It was mostly rewritten but keeps the same idea.
*/ */
void __hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
{ {
struct pps_normtime pts_norm, freq_norm; struct pps_normtime pts_norm, freq_norm;
...@@ -969,7 +969,7 @@ void __hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) ...@@ -969,7 +969,7 @@ void __hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
} }
/* ok, now we have a base for frequency calculation */ /* ok, now we have a base for frequency calculation */
freq_norm = pps_normalize_ts(timespec_sub(*raw_ts, pps_fbase)); freq_norm = pps_normalize_ts(timespec64_sub(*raw_ts, pps_fbase));
/* check that the signal is in the range /* check that the signal is in the range
* [1s - MAXFREQ us, 1s + MAXFREQ us], otherwise reject it */ * [1s - MAXFREQ us, 1s + MAXFREQ us], otherwise reject it */
......
...@@ -9,5 +9,5 @@ extern ktime_t ntp_get_next_leap(void); ...@@ -9,5 +9,5 @@ extern ktime_t ntp_get_next_leap(void);
extern int second_overflow(unsigned long secs); extern int second_overflow(unsigned long secs);
extern int ntp_validate_timex(struct timex *); extern int ntp_validate_timex(struct timex *);
extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *); extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *);
extern void __hardpps(const struct timespec *, const struct timespec *); extern void __hardpps(const struct timespec64 *, const struct timespec64 *);
#endif /* _LINUX_NTP_INTERNAL_H */ #endif /* _LINUX_NTP_INTERNAL_H */
...@@ -249,7 +249,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) ...@@ -249,7 +249,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
* but barriers are not required because update_gt_cputime() * but barriers are not required because update_gt_cputime()
* can handle concurrent updates. * can handle concurrent updates.
*/ */
WRITE_ONCE(cputimer->running, 1); WRITE_ONCE(cputimer->running, true);
} }
sample_cputime_atomic(times, &cputimer->cputime_atomic); sample_cputime_atomic(times, &cputimer->cputime_atomic);
} }
...@@ -864,6 +864,13 @@ static void check_thread_timers(struct task_struct *tsk, ...@@ -864,6 +864,13 @@ static void check_thread_timers(struct task_struct *tsk,
unsigned long long expires; unsigned long long expires;
unsigned long soft; unsigned long soft;
/*
* If cputime_expires is zero, then there are no active
* per thread CPU timers.
*/
if (task_cputime_zero(&tsk->cputime_expires))
return;
expires = check_timers_list(timers, firing, prof_ticks(tsk)); expires = check_timers_list(timers, firing, prof_ticks(tsk));
tsk_expires->prof_exp = expires_to_cputime(expires); tsk_expires->prof_exp = expires_to_cputime(expires);
...@@ -911,7 +918,7 @@ static inline void stop_process_timers(struct signal_struct *sig) ...@@ -911,7 +918,7 @@ static inline void stop_process_timers(struct signal_struct *sig)
struct thread_group_cputimer *cputimer = &sig->cputimer; struct thread_group_cputimer *cputimer = &sig->cputimer;
/* Turn off cputimer->running. This is done without locking. */ /* Turn off cputimer->running. This is done without locking. */
WRITE_ONCE(cputimer->running, 0); WRITE_ONCE(cputimer->running, false);
} }
static u32 onecputick; static u32 onecputick;
...@@ -961,6 +968,19 @@ static void check_process_timers(struct task_struct *tsk, ...@@ -961,6 +968,19 @@ static void check_process_timers(struct task_struct *tsk,
struct task_cputime cputime; struct task_cputime cputime;
unsigned long soft; unsigned long soft;
/*
* If cputimer is not running, then there are no active
* process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
*/
if (!READ_ONCE(tsk->signal->cputimer.running))
return;
/*
* Signify that a thread is checking for process timers.
* Write access to this field is protected by the sighand lock.
*/
sig->cputimer.checking_timer = true;
/* /*
* Collect the current process totals. * Collect the current process totals.
*/ */
...@@ -1015,6 +1035,8 @@ static void check_process_timers(struct task_struct *tsk, ...@@ -1015,6 +1035,8 @@ static void check_process_timers(struct task_struct *tsk,
sig->cputime_expires.sched_exp = sched_expires; sig->cputime_expires.sched_exp = sched_expires;
if (task_cputime_zero(&sig->cputime_expires)) if (task_cputime_zero(&sig->cputime_expires))
stop_process_timers(sig); stop_process_timers(sig);
sig->cputimer.checking_timer = false;
} }
/* /*
...@@ -1117,24 +1139,33 @@ static inline int task_cputime_expired(const struct task_cputime *sample, ...@@ -1117,24 +1139,33 @@ static inline int task_cputime_expired(const struct task_cputime *sample,
static inline int fastpath_timer_check(struct task_struct *tsk) static inline int fastpath_timer_check(struct task_struct *tsk)
{ {
struct signal_struct *sig; struct signal_struct *sig;
cputime_t utime, stime;
task_cputime(tsk, &utime, &stime);
if (!task_cputime_zero(&tsk->cputime_expires)) { if (!task_cputime_zero(&tsk->cputime_expires)) {
struct task_cputime task_sample = { struct task_cputime task_sample;
.utime = utime,
.stime = stime,
.sum_exec_runtime = tsk->se.sum_exec_runtime
};
task_cputime(tsk, &task_sample.utime, &task_sample.stime);
task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
return 1; return 1;
} }
sig = tsk->signal; sig = tsk->signal;
/* Check if cputimer is running. This is accessed without locking. */ /*
if (READ_ONCE(sig->cputimer.running)) { * Check if thread group timers expired when the cputimer is
* running and no other thread in the group is already checking
* for thread group cputimers. These fields are read without the
* sighand lock. However, this is fine because this is meant to
* be a fastpath heuristic to determine whether we should try to
* acquire the sighand lock to check/handle timers.
*
* In the worst case scenario, if 'running' or 'checking_timer' gets
* set but the current thread doesn't see the change yet, we'll wait
* until the next thread in the group gets a scheduler interrupt to
* handle the timer. This isn't an issue in practice because these
* types of delays with signals actually getting sent are expected.
*/
if (READ_ONCE(sig->cputimer.running) &&
!READ_ONCE(sig->cputimer.checking_timer)) {
struct task_cputime group_sample; struct task_cputime group_sample;
sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic); sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic);
...@@ -1174,12 +1205,8 @@ void run_posix_cpu_timers(struct task_struct *tsk) ...@@ -1174,12 +1205,8 @@ void run_posix_cpu_timers(struct task_struct *tsk)
* put them on the firing list. * put them on the firing list.
*/ */
check_thread_timers(tsk, &firing); check_thread_timers(tsk, &firing);
/*
* If there are any active process wide timers (POSIX 1.b, itimers, check_process_timers(tsk, &firing);
* RLIMIT_CPU) cputimer must be running.
*/
if (READ_ONCE(tsk->signal->cputimer.running))
check_process_timers(tsk, &firing);
/* /*
* We must release these locks before taking any timer's lock. * We must release these locks before taking any timer's lock.
......
...@@ -39,7 +39,7 @@ define fmuls(b,n,d) { ...@@ -39,7 +39,7 @@ define fmuls(b,n,d) {
} }
define timeconst(hz) { define timeconst(hz) {
print "/* Automatically generated by kernel/timeconst.bc */\n" print "/* Automatically generated by kernel/time/timeconst.bc */\n"
print "/* Time conversion constants for HZ == ", hz, " */\n" print "/* Time conversion constants for HZ == ", hz, " */\n"
print "\n" print "\n"
......
...@@ -849,7 +849,7 @@ EXPORT_SYMBOL_GPL(ktime_get_real_seconds); ...@@ -849,7 +849,7 @@ EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
#ifdef CONFIG_NTP_PPS #ifdef CONFIG_NTP_PPS
/** /**
* getnstime_raw_and_real - get day and raw monotonic time in timespec format * ktime_get_raw_and_real_ts64 - get day and raw monotonic time in timespec format
* @ts_raw: pointer to the timespec to be set to raw monotonic time * @ts_raw: pointer to the timespec to be set to raw monotonic time
* @ts_real: pointer to the timespec to be set to the time of day * @ts_real: pointer to the timespec to be set to the time of day
* *
...@@ -857,7 +857,7 @@ EXPORT_SYMBOL_GPL(ktime_get_real_seconds); ...@@ -857,7 +857,7 @@ EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
* same time atomically and stores the resulting timestamps in timespec * same time atomically and stores the resulting timestamps in timespec
* format. * format.
*/ */
void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw, struct timespec64 *ts_real)
{ {
struct timekeeper *tk = &tk_core.timekeeper; struct timekeeper *tk = &tk_core.timekeeper;
unsigned long seq; unsigned long seq;
...@@ -868,7 +868,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) ...@@ -868,7 +868,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
do { do {
seq = read_seqcount_begin(&tk_core.seq); seq = read_seqcount_begin(&tk_core.seq);
*ts_raw = timespec64_to_timespec(tk->raw_time); *ts_raw = tk->raw_time;
ts_real->tv_sec = tk->xtime_sec; ts_real->tv_sec = tk->xtime_sec;
ts_real->tv_nsec = 0; ts_real->tv_nsec = 0;
...@@ -877,10 +877,10 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) ...@@ -877,10 +877,10 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
} while (read_seqcount_retry(&tk_core.seq, seq)); } while (read_seqcount_retry(&tk_core.seq, seq));
timespec_add_ns(ts_raw, nsecs_raw); timespec64_add_ns(ts_raw, nsecs_raw);
timespec_add_ns(ts_real, nsecs_real); timespec64_add_ns(ts_real, nsecs_real);
} }
EXPORT_SYMBOL(getnstime_raw_and_real); EXPORT_SYMBOL(ktime_get_raw_and_real_ts64);
#endif /* CONFIG_NTP_PPS */ #endif /* CONFIG_NTP_PPS */
...@@ -1674,7 +1674,7 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) ...@@ -1674,7 +1674,7 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
/** /**
* accumulate_nsecs_to_secs - Accumulates nsecs into secs * accumulate_nsecs_to_secs - Accumulates nsecs into secs
* *
* Helper function that accumulates a the nsecs greater then a second * Helper function that accumulates the nsecs greater than a second
* from the xtime_nsec field to the xtime_secs field. * from the xtime_nsec field to the xtime_secs field.
* It also calls into the NTP code to handle leapsecond processing. * It also calls into the NTP code to handle leapsecond processing.
* *
...@@ -1726,7 +1726,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, ...@@ -1726,7 +1726,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
cycle_t interval = tk->cycle_interval << shift; cycle_t interval = tk->cycle_interval << shift;
u64 raw_nsecs; u64 raw_nsecs;
/* If the offset is smaller then a shifted interval, do nothing */ /* If the offset is smaller than a shifted interval, do nothing */
if (offset < interval) if (offset < interval)
return offset; return offset;
...@@ -2025,7 +2025,7 @@ int do_adjtimex(struct timex *txc) ...@@ -2025,7 +2025,7 @@ int do_adjtimex(struct timex *txc)
/** /**
* hardpps() - Accessor function to NTP __hardpps function * hardpps() - Accessor function to NTP __hardpps function
*/ */
void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
{ {
unsigned long flags; unsigned long flags;
......
...@@ -461,10 +461,17 @@ void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr) ...@@ -461,10 +461,17 @@ void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
static void timer_stats_account_timer(struct timer_list *timer) static void timer_stats_account_timer(struct timer_list *timer)
{ {
if (likely(!timer->start_site)) void *site;
/*
* start_site can be concurrently reset by
* timer_stats_timer_clear_start_info()
*/
site = READ_ONCE(timer->start_site);
if (likely(!site))
return; return;
timer_stats_update_stats(timer, timer->start_pid, timer->start_site, timer_stats_update_stats(timer, timer->start_pid, site,
timer->function, timer->start_comm, timer->function, timer->start_comm,
timer->flags); timer->flags);
} }
...@@ -867,7 +874,7 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires) ...@@ -867,7 +874,7 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
if (mask == 0) if (mask == 0)
return expires; return expires;
bit = find_last_bit(&mask, BITS_PER_LONG); bit = __fls(mask);
mask = (1UL << bit) - 1; mask = (1UL << bit) - 1;
......
...@@ -8,7 +8,7 @@ LDFLAGS += -lrt -lpthread ...@@ -8,7 +8,7 @@ LDFLAGS += -lrt -lpthread
TEST_PROGS = posix_timers nanosleep nsleep-lat set-timer-lat mqueue-lat \ TEST_PROGS = posix_timers nanosleep nsleep-lat set-timer-lat mqueue-lat \
inconsistency-check raw_skew threadtest rtctest inconsistency-check raw_skew threadtest rtctest
TEST_PROGS_EXTENDED = alarmtimer-suspend valid-adjtimex change_skew \ TEST_PROGS_EXTENDED = alarmtimer-suspend valid-adjtimex adjtick change_skew \
skew_consistency clocksource-switch leap-a-day \ skew_consistency clocksource-switch leap-a-day \
leapcrash set-tai set-2038 leapcrash set-tai set-2038
...@@ -24,6 +24,7 @@ include ../lib.mk ...@@ -24,6 +24,7 @@ include ../lib.mk
run_destructive_tests: run_tests run_destructive_tests: run_tests
./alarmtimer-suspend ./alarmtimer-suspend
./valid-adjtimex ./valid-adjtimex
./adjtick
./change_skew ./change_skew
./skew_consistency ./skew_consistency
./clocksource-switch ./clocksource-switch
......
/* adjtimex() tick adjustment test
* by: John Stultz <john.stultz@linaro.org>
* (C) Copyright Linaro Limited 2015
* Licensed under the GPLv2
*
* To build:
* $ gcc adjtick.c -o adjtick -lrt
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/timex.h>
#include <time.h>
#ifdef KTEST
#include "../kselftest.h"
#else
static inline int ksft_exit_pass(void)
{
exit(0);
}
static inline int ksft_exit_fail(void)
{
exit(1);
}
#endif
#define CLOCK_MONOTONIC_RAW 4
#define NSEC_PER_SEC 1000000000LL
#define USEC_PER_SEC 1000000
#define MILLION 1000000
long systick;
long long llabs(long long val)
{
if (val < 0)
val = -val;
return val;
}
unsigned long long ts_to_nsec(struct timespec ts)
{
return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
}
struct timespec nsec_to_ts(long long ns)
{
struct timespec ts;
ts.tv_sec = ns/NSEC_PER_SEC;
ts.tv_nsec = ns%NSEC_PER_SEC;
return ts;
}
long long diff_timespec(struct timespec start, struct timespec end)
{
long long start_ns, end_ns;
start_ns = ts_to_nsec(start);
end_ns = ts_to_nsec(end);
return end_ns - start_ns;
}
void get_monotonic_and_raw(struct timespec *mon, struct timespec *raw)
{
struct timespec start, mid, end;
long long diff = 0, tmp;
int i;
clock_gettime(CLOCK_MONOTONIC, mon);
clock_gettime(CLOCK_MONOTONIC_RAW, raw);
/* Try to get a more tightly bound pairing */
for (i = 0; i < 3; i++) {
long long newdiff;
clock_gettime(CLOCK_MONOTONIC, &start);
clock_gettime(CLOCK_MONOTONIC_RAW, &mid);
clock_gettime(CLOCK_MONOTONIC, &end);
newdiff = diff_timespec(start, end);
if (diff == 0 || newdiff < diff) {
diff = newdiff;
*raw = mid;
tmp = (ts_to_nsec(start) + ts_to_nsec(end))/2;
*mon = nsec_to_ts(tmp);
}
}
}
long long get_ppm_drift(void)
{
struct timespec mon_start, raw_start, mon_end, raw_end;
long long delta1, delta2, eppm;
get_monotonic_and_raw(&mon_start, &raw_start);
sleep(15);
get_monotonic_and_raw(&mon_end, &raw_end);
delta1 = diff_timespec(mon_start, mon_end);
delta2 = diff_timespec(raw_start, raw_end);
eppm = (delta1*MILLION)/delta2 - MILLION;
return eppm;
}
int check_tick_adj(long tickval)
{
long long eppm, ppm;
struct timex tx1;
tx1.modes = ADJ_TICK;
tx1.modes |= ADJ_OFFSET;
tx1.modes |= ADJ_FREQUENCY;
tx1.modes |= ADJ_STATUS;
tx1.status = STA_PLL;
tx1.offset = 0;
tx1.freq = 0;
tx1.tick = tickval;
adjtimex(&tx1);
sleep(1);
ppm = ((long long)tickval * MILLION)/systick - MILLION;
printf("Estimating tick (act: %ld usec, %lld ppm): ", tickval, ppm);
eppm = get_ppm_drift();
printf("%lld usec, %lld ppm", systick + (systick * eppm / MILLION), eppm);
tx1.modes = 0;
adjtimex(&tx1);
if (tx1.offset || tx1.freq || tx1.tick != tickval) {
printf(" [ERROR]\n");
printf("\tUnexpected adjtimex return values, make sure ntpd is not running.\n");
return -1;
}
/*
* Here we use 100ppm difference as an error bound.
* We likely should see better, but some coarse clocksources
* cannot match the HZ tick size accurately, so we have a
* internal correction factor that doesn't scale exactly
* with the adjustment, resulting in > 10ppm error during
* a 10% adjustment. 100ppm also gives us more breathing
* room for interruptions during the measurement.
*/
if (llabs(eppm - ppm) > 100) {
printf(" [FAILED]\n");
return -1;
}
printf(" [OK]\n");
return 0;
}
int main(int argv, char **argc)
{
struct timespec raw;
long tick, max, interval, err;
struct timex tx1;
err = 0;
setbuf(stdout, NULL);
if (clock_gettime(CLOCK_MONOTONIC_RAW, &raw)) {
printf("ERR: NO CLOCK_MONOTONIC_RAW\n");
return -1;
}
printf("Each iteration takes about 15 seconds\n");
systick = sysconf(_SC_CLK_TCK);
systick = USEC_PER_SEC/sysconf(_SC_CLK_TCK);
max = systick/10; /* +/- 10% */
interval = max/4; /* in 4 steps each side */
for (tick = (systick - max); tick < (systick + max); tick += interval) {
if (check_tick_adj(tick)) {
err = 1;
break;
}
}
/* Reset things to zero */
tx1.modes = ADJ_TICK;
tx1.modes |= ADJ_OFFSET;
tx1.modes |= ADJ_FREQUENCY;
tx1.offset = 0;
tx1.freq = 0;
tx1.tick = systick;
adjtimex(&tx1);
if (err)
return ksft_exit_fail();
return ksft_exit_pass();
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment