Merge drm/drm-next into drm-misc-next

Start the 6.5 release cycle.

Signed-off-by: Maxime Ripard <maxime@cerno.tech>
This commit is contained in:
Maxime Ripard 2023-05-09 15:03:40 +02:00
commit ff32fcca64
No known key found for this signature in database
GPG key ID: E3EF0D6F671851C5
12099 changed files with 629240 additions and 379856 deletions

View file

@ -25,6 +25,7 @@ config IA64
select PCI_DOMAINS if PCI
select PCI_MSI
select PCI_SYSCALL if PCI
select HAS_IOPORT
select HAVE_ASM_MODVERSIONS
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_EXIT_THREAD
@ -202,10 +203,9 @@ config IA64_CYCLONE
If you're unsure, answer N.
config ARCH_FORCE_MAX_ORDER
int "MAX_ORDER (11 - 17)" if !HUGETLB_PAGE
range 11 17 if !HUGETLB_PAGE
default "17" if HUGETLB_PAGE
default "11"
int
default "16" if HUGETLB_PAGE
default "10"
config SMP
bool "Symmetric multi-processing support"

View file

@ -5,7 +5,7 @@
#include <uapi/asm/cmpxchg.h>
#define arch_xchg(ptr, x) \
({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
({(__typeof__(*(ptr))) __arch_xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
#define arch_cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
#define arch_cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))

View file

@ -12,9 +12,9 @@
#define SECTION_SIZE_BITS (30)
#define MAX_PHYSMEM_BITS (50)
#ifdef CONFIG_ARCH_FORCE_MAX_ORDER
#if ((CONFIG_ARCH_FORCE_MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS)
#if (CONFIG_ARCH_FORCE_MAX_ORDER + PAGE_SHIFT > SECTION_SIZE_BITS)
#undef SECTION_SIZE_BITS
#define SECTION_SIZE_BITS (CONFIG_ARCH_FORCE_MAX_ORDER - 1 + PAGE_SHIFT)
#define SECTION_SIZE_BITS (CONFIG_ARCH_FORCE_MAX_ORDER + PAGE_SHIFT)
#endif
#endif

View file

@ -23,7 +23,7 @@
*/
extern void ia64_xchg_called_with_bad_pointer(void);
#define __xchg(x, ptr, size) \
#define __arch_xchg(x, ptr, size) \
({ \
unsigned long __xchg_result; \
\
@ -51,7 +51,7 @@ extern void ia64_xchg_called_with_bad_pointer(void);
#ifndef __KERNEL__
#define xchg(ptr, x) \
({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
({(__typeof__(*(ptr))) __arch_xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
#endif
/*

View file

@ -234,15 +234,6 @@ static struct ctl_table kdump_ctl_table[] = {
},
{ }
};
static struct ctl_table sys_table[] = {
{
.procname = "kernel",
.mode = 0555,
.child = kdump_ctl_table,
},
{ }
};
#endif
static int
@ -257,7 +248,7 @@ machine_crash_setup(void)
if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0)
return ret;
#ifdef CONFIG_SYSCTL
register_sysctl_table(sys_table);
register_sysctl("kernel", kdump_ctl_table);
#endif
return 0;
}

View file

@ -853,7 +853,7 @@ valid_phys_addr_range (phys_addr_t phys_addr, unsigned long size)
* /dev/mem reads and writes use copy_to_user(), which implicitly
* uses a granule-sized kernel identity mapping. It's really
* only safe to do this for regions in kern_memmap. For more
* details, see Documentation/ia64/aliasing.rst.
* details, see Documentation/arch/ia64/aliasing.rst.
*/
attr = kern_mem_attribute(phys_addr, size);
if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC)

View file

@ -28,7 +28,7 @@
#include <asm/native/inst.h>
/*
* See Documentation/ia64/fsys.rst for details on fsyscalls.
* See Documentation/arch/ia64/fsys.rst for details on fsyscalls.
*
* On entry to an fsyscall handler:
* r10 = 0 (i.e., defaults to "successful syscall return")

View file

@ -485,19 +485,19 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
return 0;
}
static inline int
static inline bool
in_init (const struct module *mod, uint64_t addr)
{
return addr - (uint64_t) mod->init_layout.base < mod->init_layout.size;
return within_module_init(addr, mod);
}
static inline int
static inline bool
in_core (const struct module *mod, uint64_t addr)
{
return addr - (uint64_t) mod->core_layout.base < mod->core_layout.size;
return within_module_core(addr, mod);
}
static inline int
static inline bool
is_internal (const struct module *mod, uint64_t value)
{
return in_init(mod, value) || in_core(mod, value);
@ -677,7 +677,8 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
break;
case RV_BDREL:
val -= (uint64_t) (in_init(mod, val) ? mod->init_layout.base : mod->core_layout.base);
val -= (uint64_t) (in_init(mod, val) ? mod->mem[MOD_INIT_TEXT].base :
mod->mem[MOD_TEXT].base);
break;
case RV_LTV:
@ -812,15 +813,18 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
* addresses have been selected...
*/
uint64_t gp;
if (mod->core_layout.size > MAX_LTOFF)
struct module_memory *mod_mem;
mod_mem = &mod->mem[MOD_DATA];
if (mod_mem->size > MAX_LTOFF)
/*
* This takes advantage of fact that SHF_ARCH_SMALL gets allocated
* at the end of the module.
*/
gp = mod->core_layout.size - MAX_LTOFF / 2;
gp = mod_mem->size - MAX_LTOFF / 2;
else
gp = mod->core_layout.size / 2;
gp = (uint64_t) mod->core_layout.base + ((gp + 7) & -8);
gp = mod_mem->size / 2;
gp = (uint64_t) mod_mem->base + ((gp + 7) & -8);
mod->arch.gp = gp;
DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
}

View file

@ -201,7 +201,7 @@ __setup("nohalt", nohalt_setup);
#ifdef CONFIG_HOTPLUG_CPU
/* We don't actually take CPU down, just spin without interrupts. */
static inline void play_dead(void)
static inline void __noreturn play_dead(void)
{
unsigned int this_cpu = smp_processor_id();
@ -219,13 +219,13 @@ static inline void play_dead(void)
BUG();
}
#else
static inline void play_dead(void)
static inline void __noreturn play_dead(void)
{
BUG();
}
#endif /* CONFIG_HOTPLUG_CPU */
void arch_cpu_idle_dead(void)
void __noreturn arch_cpu_idle_dead(void)
{
play_dead();
}

View file

@ -581,7 +581,7 @@ static int salinfo_cpu_pre_down(unsigned int cpu)
* 'data' contains an integer that corresponds to the feature we're
* testing
*/
static int proc_salinfo_show(struct seq_file *m, void *v)
static int __maybe_unused proc_salinfo_show(struct seq_file *m, void *v)
{
unsigned long data = (unsigned long)v;
seq_puts(m, (sal_platform_features & data) ? "1\n" : "0\n");

View file

@ -220,11 +220,11 @@ kdump_smp_send_init(void)
* Called with preemption disabled.
*/
void
smp_send_reschedule (int cpu)
arch_smp_send_reschedule (int cpu)
{
ia64_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
}
EXPORT_SYMBOL_GPL(smp_send_reschedule);
EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
/*
* Called with preemption disabled.

View file

@ -77,7 +77,7 @@ skip:
return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
}
static inline void
static inline __init void
alloc_per_cpu_data(void)
{
size_t size = PERCPU_PAGE_SIZE * num_possible_cpus();

View file

@ -58,7 +58,7 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz)
pgd = pgd_offset(mm, taddr);
if (pgd_present(*pgd)) {
p4d = p4d_offset(pgd, addr);
p4d = p4d_offset(pgd, taddr);
if (p4d_present(*p4d)) {
pud = pud_offset(p4d, taddr);
if (pud_present(*pud)) {
@ -170,7 +170,7 @@ static int __init hugetlb_setup_sz(char *str)
size = memparse(str, &str);
if (*str || !is_power_of_2(size) || !(tr_pages & size) ||
size <= PAGE_SIZE ||
size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
size > (1UL << PAGE_SHIFT << MAX_ORDER)) {
printk(KERN_WARNING "Invalid huge page size specified\n");
return 1;
}

View file

@ -43,7 +43,7 @@ ioremap (unsigned long phys_addr, unsigned long size)
/*
* For things in kern_memmap, we must use the same attribute
* as the rest of the kernel. For more details, see
* Documentation/ia64/aliasing.rst.
* Documentation/arch/ia64/aliasing.rst.
*/
attr = kern_mem_attribute(phys_addr, size);
if (attr & EFI_MEMORY_WB)

View file

@ -448,7 +448,7 @@ pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma,
return -ENOSYS;
/*
* Avoid attribute aliasing. See Documentation/ia64/aliasing.rst
* Avoid attribute aliasing. See Documentation/arch/ia64/aliasing.rst
* for more details.
*/
if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))