mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-26 14:17:26 -04:00
clocksource: hyper-v: Adjust hv_read_tsc_page_tsc() to avoid special casing U64_MAX
Currently hv_read_tsc_page_tsc() (ab)uses the (valid) time value of U64_MAX as an error return. This breaks the clean wrap-around of the clock. Modify the function signature to return a boolean state and provide another u64 pointer to store the actual time on success. This obviates the need to steal one time value and restores the full counter width. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Michael Kelley <mikelley@microsoft.com> Tested-by: Michael Kelley <mikelley@microsoft.com> # Hyper-V Link: https://lore.kernel.org/r/20230519102715.775630881@infradead.org
This commit is contained in:
parent
77750f78b0
commit
9397fa2ea3
4 changed files with 29 additions and 28 deletions
|
@ -238,10 +238,12 @@ static u64 vread_pvclock(void)
|
||||||
#ifdef CONFIG_HYPERV_TIMER
|
#ifdef CONFIG_HYPERV_TIMER
|
||||||
static u64 vread_hvclock(void)
|
static u64 vread_hvclock(void)
|
||||||
{
|
{
|
||||||
u64 ret = hv_read_tsc_page(&hvclock_page);
|
u64 tsc, time;
|
||||||
if (likely(ret != U64_MAX))
|
|
||||||
ret &= S64_MAX;
|
if (hv_read_tsc_page_tsc(&hvclock_page, &tsc, &time))
|
||||||
return ret;
|
return time & S64_MAX;
|
||||||
|
|
||||||
|
return U64_MAX;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -2799,14 +2799,13 @@ static u64 read_tsc(void)
|
||||||
static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp,
|
static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp,
|
||||||
int *mode)
|
int *mode)
|
||||||
{
|
{
|
||||||
long v;
|
|
||||||
u64 tsc_pg_val;
|
u64 tsc_pg_val;
|
||||||
|
long v;
|
||||||
|
|
||||||
switch (clock->vclock_mode) {
|
switch (clock->vclock_mode) {
|
||||||
case VDSO_CLOCKMODE_HVCLOCK:
|
case VDSO_CLOCKMODE_HVCLOCK:
|
||||||
tsc_pg_val = hv_read_tsc_page_tsc(hv_get_tsc_page(),
|
if (hv_read_tsc_page_tsc(hv_get_tsc_page(),
|
||||||
tsc_timestamp);
|
tsc_timestamp, &tsc_pg_val)) {
|
||||||
if (tsc_pg_val != U64_MAX) {
|
|
||||||
/* TSC page valid */
|
/* TSC page valid */
|
||||||
*mode = VDSO_CLOCKMODE_HVCLOCK;
|
*mode = VDSO_CLOCKMODE_HVCLOCK;
|
||||||
v = (tsc_pg_val - clock->cycle_last) &
|
v = (tsc_pg_val - clock->cycle_last) &
|
||||||
|
|
|
@ -393,14 +393,20 @@ struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(hv_get_tsc_page);
|
EXPORT_SYMBOL_GPL(hv_get_tsc_page);
|
||||||
|
|
||||||
static u64 notrace read_hv_clock_tsc(void)
|
static notrace u64 read_hv_clock_tsc(void)
|
||||||
{
|
{
|
||||||
u64 current_tick = hv_read_tsc_page(hv_get_tsc_page());
|
u64 cur_tsc, time;
|
||||||
|
|
||||||
if (current_tick == U64_MAX)
|
/*
|
||||||
current_tick = hv_get_register(HV_REGISTER_TIME_REF_COUNT);
|
* The Hyper-V Top-Level Function Spec (TLFS), section Timers,
|
||||||
|
* subsection Refererence Counter, guarantees that the TSC and MSR
|
||||||
|
* times are in sync and monotonic. Therefore we can fall back
|
||||||
|
* to the MSR in case the TSC page indicates unavailability.
|
||||||
|
*/
|
||||||
|
if (!hv_read_tsc_page_tsc(tsc_page, &cur_tsc, &time))
|
||||||
|
time = hv_get_register(HV_REGISTER_TIME_REF_COUNT);
|
||||||
|
|
||||||
return current_tick;
|
return time;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg)
|
static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg)
|
||||||
|
|
|
@ -38,8 +38,9 @@ extern void hv_remap_tsc_clocksource(void);
|
||||||
extern unsigned long hv_get_tsc_pfn(void);
|
extern unsigned long hv_get_tsc_pfn(void);
|
||||||
extern struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
|
extern struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
|
||||||
|
|
||||||
static inline notrace u64
|
static __always_inline bool
|
||||||
hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, u64 *cur_tsc)
|
hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
|
||||||
|
u64 *cur_tsc, u64 *time)
|
||||||
{
|
{
|
||||||
u64 scale, offset;
|
u64 scale, offset;
|
||||||
u32 sequence;
|
u32 sequence;
|
||||||
|
@ -63,7 +64,7 @@ hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, u64 *cur_tsc)
|
||||||
do {
|
do {
|
||||||
sequence = READ_ONCE(tsc_pg->tsc_sequence);
|
sequence = READ_ONCE(tsc_pg->tsc_sequence);
|
||||||
if (!sequence)
|
if (!sequence)
|
||||||
return U64_MAX;
|
return false;
|
||||||
/*
|
/*
|
||||||
* Make sure we read sequence before we read other values from
|
* Make sure we read sequence before we read other values from
|
||||||
* TSC page.
|
* TSC page.
|
||||||
|
@ -82,15 +83,8 @@ hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, u64 *cur_tsc)
|
||||||
|
|
||||||
} while (READ_ONCE(tsc_pg->tsc_sequence) != sequence);
|
} while (READ_ONCE(tsc_pg->tsc_sequence) != sequence);
|
||||||
|
|
||||||
return mul_u64_u64_shr(*cur_tsc, scale, 64) + offset;
|
*time = mul_u64_u64_shr(*cur_tsc, scale, 64) + offset;
|
||||||
}
|
return true;
|
||||||
|
|
||||||
static inline notrace u64
|
|
||||||
hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg)
|
|
||||||
{
|
|
||||||
u64 cur_tsc;
|
|
||||||
|
|
||||||
return hv_read_tsc_page_tsc(tsc_pg, &cur_tsc);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* CONFIG_HYPERV_TIMER */
|
#else /* CONFIG_HYPERV_TIMER */
|
||||||
|
@ -104,10 +98,10 @@ static inline struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
|
static __always_inline bool
|
||||||
u64 *cur_tsc)
|
hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, u64 *cur_tsc, u64 *time)
|
||||||
{
|
{
|
||||||
return U64_MAX;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int hv_stimer_cleanup(unsigned int cpu) { return 0; }
|
static inline int hv_stimer_cleanup(unsigned int cpu) { return 0; }
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue