diff options
author | Tom Rini <trini@konsulko.com> | 2019-12-18 07:20:19 -0500 |
---|---|---|
committer | Tom Rini <trini@konsulko.com> | 2019-12-18 07:20:19 -0500 |
commit | c0912f9bbfb26dd03d189953678691b799d35b6e (patch) | |
tree | f879600cd26b8d4678a174854b623941e5dc2ada /arch/x86/cpu/intel_common | |
parent | 533c9f5714bdba79dc6f2629284d4c1a08a611d1 (diff) | |
parent | a1d6dc3f84071f05574044f337dbdca70fae495d (diff) |
Merge branch 'next' of https://gitlab.denx.de/u-boot/custodians/u-boot-x86 into next
- Various x86 common codes updated for TPL/SPL
- I2C designware driver updated for PCI
- ICH SPI driver updated to support Apollo Lake
- Add Intel FSP2 base support
- Intel Apollo Lake platform specific drivers support
- Add a new board Google Chromebook Coral
Diffstat (limited to 'arch/x86/cpu/intel_common')
-rw-r--r-- | arch/x86/cpu/intel_common/Makefile | 10 | ||||
-rw-r--r-- | arch/x86/cpu/intel_common/car2.S | 448 | ||||
-rw-r--r-- | arch/x86/cpu/intel_common/car2_uninit.S | 87 | ||||
-rw-r--r-- | arch/x86/cpu/intel_common/fast_spi.c | 73 | ||||
-rw-r--r-- | arch/x86/cpu/intel_common/lpss.c | 44 |
5 files changed, 662 insertions, 0 deletions
diff --git a/arch/x86/cpu/intel_common/Makefile b/arch/x86/cpu/intel_common/Makefile index 07f27c29ec..cc4e1c962b 100644 --- a/arch/x86/cpu/intel_common/Makefile +++ b/arch/x86/cpu/intel_common/Makefile @@ -8,8 +8,18 @@ obj-$(CONFIG_$(SPL_TPL_)X86_32BIT_INIT) += me_status.o obj-$(CONFIG_$(SPL_TPL_)X86_32BIT_INIT) += report_platform.o obj-$(CONFIG_$(SPL_TPL_)X86_32BIT_INIT) += mrc.o endif + +ifdef CONFIG_INTEL_CAR_CQOS +obj-$(CONFIG_TPL_BUILD) += car2.o +ifndef CONFIG_SPL_BUILD +obj-y += car2_uninit.o +endif +endif + obj-y += cpu.o +obj-y += fast_spi.o obj-y += lpc.o +obj-y += lpss.o ifndef CONFIG_TARGET_EFI_APP obj-$(CONFIG_$(SPL_TPL_)X86_32BIT_INIT) += microcode.o ifndef CONFIG_$(SPL_)X86_64 diff --git a/arch/x86/cpu/intel_common/car2.S b/arch/x86/cpu/intel_common/car2.S new file mode 100644 index 0000000000..086f987477 --- /dev/null +++ b/arch/x86/cpu/intel_common/car2.S @@ -0,0 +1,448 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file was modified from the coreboot version. + * + * Copyright (C) 2015-2016 Intel Corp. + */ + +#include <config.h> +#include <asm/msr-index.h> +#include <asm/mtrr.h> +#include <asm/post.h> +#include <asm/processor.h> +#include <asm/processor-flags.h> + +#define KiB 1024 + +#define IS_POWER_OF_2(x) (!((x) & ((x) - 1))) + +.global car_init +car_init: + post_code(POST_CAR_START) + + /* + * Use the MTRR default type MSR as a proxy for detecting INIT#. + * Reset the system if any known bits are set in that MSR. That is + * an indication of the CPU not being properly reset. + */ +check_for_clean_reset: + mov $MTRR_DEF_TYPE_MSR, %ecx + rdmsr + and $(MTRR_DEF_TYPE_EN | MTRR_DEF_TYPE_FIX_EN), %eax + cmp $0, %eax + jz no_reset + /* perform warm reset */ + movw $IO_PORT_RESET, %dx + movb $(SYS_RST | RST_CPU), %al + outb %al, %dx + +no_reset: + post_code(POST_CAR_SIPI) + + /* Clear/disable fixed MTRRs */ + mov $fixed_mtrr_list_size, %ebx + xor %eax, %eax + xor %edx, %edx + +clear_fixed_mtrr: + add $-2, %ebx + movzwl fixed_mtrr_list(%ebx), %ecx + wrmsr + jnz clear_fixed_mtrr + + post_code(POST_CAR_MTRR) + + /* Figure put how many MTRRs we have, and clear them out */ + mov $MTRR_CAP_MSR, %ecx + rdmsr + movzb %al, %ebx /* Number of variable MTRRs */ + mov $MTRR_PHYS_BASE_MSR(0), %ecx + xor %eax, %eax + xor %edx, %edx + +clear_var_mtrr: + wrmsr + inc %ecx + wrmsr + inc %ecx + dec %ebx + jnz clear_var_mtrr + + post_code(POST_CAR_UNCACHEABLE) + + /* Configure default memory type to uncacheable (UC) */ + mov $MTRR_DEF_TYPE_MSR, %ecx + rdmsr + /* Clear enable bits and set default type to UC */ + and $~(MTRR_DEF_TYPE_MASK | MTRR_DEF_TYPE_EN | \ + MTRR_DEF_TYPE_FIX_EN), %eax + wrmsr + + /* + * Configure MTRR_PHYS_MASK_HIGH for proper addressing above 4GB + * based on the physical address size supported for this processor + * This is based on read from CPUID EAX = 080000008h, EAX bits [7:0] + * + * Examples: + * MTRR_PHYS_MASK_HIGH = 00000000Fh For 36 bit addressing + * MTRR_PHYS_MASK_HIGH = 0000000FFh For 40 bit addressing + */ + + movl $0x80000008, %eax /* Address sizes leaf */ + cpuid + sub $32, %al + movzx %al, %eax + xorl %esi, %esi + bts %eax, %esi + dec %esi /* esi <- MTRR_PHYS_MASK_HIGH */ + + post_code(POST_CAR_BASE_ADDRESS) + +#if IS_POWER_OF_2(CONFIG_DCACHE_RAM_SIZE) + /* Configure CAR region as write-back (WB) */ + mov $MTRR_PHYS_BASE_MSR(0), %ecx + mov $CONFIG_DCACHE_RAM_BASE, %eax + or $MTRR_TYPE_WRBACK, %eax + xor %edx,%edx + wrmsr + + /* Configure the MTRR mask for the size region */ + mov $MTRR_PHYS_MASK(0), %ecx + mov $CONFIG_DCACHE_RAM_SIZE, %eax /* size mask */ + dec %eax + not %eax + or $MTRR_PHYS_MASK_VALID, %eax + movl %esi, %edx /* edx <- MTRR_PHYS_MASK_HIGH */ + wrmsr +#elif (CONFIG_DCACHE_RAM_SIZE == 768 * KiB) /* 768 KiB */ + /* Configure CAR region as write-back (WB) */ + mov $MTRR_PHYS_BASE_MSR(0), %ecx + mov $CONFIG_DCACHE_RAM_BASE, %eax + or $MTRR_TYPE_WRBACK, %eax + xor %edx,%edx + wrmsr + + mov $MTRR_PHYS_MASK_MSR(0), %ecx + mov $(512 * KiB), %eax /* size mask */ + dec %eax + not %eax + or $MTRR_PHYS_MASK_VALID, %eax + movl %esi, %edx /* edx <- MTRR_PHYS_MASK_HIGH */ + wrmsr + + mov $MTRR_PHYS_BASE_MSR(1), %ecx + mov $(CONFIG_DCACHE_RAM_BASE + 512 * KiB), %eax + or $MTRR_TYPE_WRBACK, %eax + xor %edx,%edx + wrmsr + + mov $MTRR_PHYS_MASK_MSR(1), %ecx + mov $(256 * KiB), %eax /* size mask */ + dec %eax + not %eax + or $MTRR_PHYS_MASK_VALID, %eax + movl %esi, %edx /* edx <- MTRR_PHYS_MASK_HIGH */ + wrmsr +#else +#error "DCACHE_RAM_SIZE is not a power of 2 and setup code is missing" +#endif + post_code(POST_CAR_FILL) + + /* Enable variable MTRRs */ + mov $MTRR_DEF_TYPE_MSR, %ecx + rdmsr + or $MTRR_DEF_TYPE_EN, %eax + wrmsr + + /* Enable caching */ + mov %cr0, %eax + and $~(X86_CR0_CD | X86_CR0_NW), %eax + invd + mov %eax, %cr0 + +#if IS_ENABLED(CONFIG_INTEL_CAR_NEM) + jmp car_nem +#elif IS_ENABLED(CONFIG_INTEL_CAR_CQOS) + jmp car_cqos +#elif IS_ENABLED(CONFIG_INTEL_CAR_NEM_ENHANCED) + jmp car_nem_enhanced +#else +#error "No CAR mechanism selected: +#endif + jmp car_init_ret + +fixed_mtrr_list: + .word MTRR_FIX_64K_00000_MSR + .word MTRR_FIX_16K_80000_MSR + .word MTRR_FIX_16K_A0000_MSR + .word MTRR_FIX_4K_C0000_MSR + .word MTRR_FIX_4K_C8000_MSR + .word MTRR_FIX_4K_D0000_MSR + .word MTRR_FIX_4K_D8000_MSR + .word MTRR_FIX_4K_E0000_MSR + .word MTRR_FIX_4K_E8000_MSR + .word MTRR_FIX_4K_F0000_MSR + .word MTRR_FIX_4K_F8000_MSR +fixed_mtrr_list_size = . - fixed_mtrr_list + +#if IS_ENABLED(CONFIG_INTEL_CAR_NEM) +.global car_nem +car_nem: + /* Disable cache eviction (setup stage) */ + mov $MSR_EVICT_CTL, %ecx + rdmsr + or $0x1, %eax + wrmsr + + post_code(0x26) + + /* Clear the cache memory region. This will also fill up the cache */ + movl $CONFIG_DCACHE_RAM_BASE, %edi + movl $CONFIG_DCACHE_RAM_SIZE, %ecx + shr $0x02, %ecx + xor %eax, %eax + cld + rep stosl + + post_code(0x27) + + /* Disable cache eviction (run stage) */ + mov $MSR_EVICT_CTL, %ecx + rdmsr + or $0x2, %eax + wrmsr + + post_code(0x28) + + jmp car_init_ret + +#elif IS_ENABLED(CONFIG_INTEL_CAR_CQOS) +.global car_cqos +car_cqos: + /* + * Create CBM_LEN_MASK based on CBM_LEN + * Get CPUID.(EAX=10H, ECX=2H):EAX.CBM_LEN[bits 4:0] + */ + mov $0x10, %eax + mov $0x2, %ecx + cpuid + and $0x1f, %eax + add $1, %al + + mov $1, %ebx + mov %al, %cl + shl %cl, %ebx + sub $1, %ebx + + /* Store the CBM_LEN_MASK in mm3 for later use */ + movd %ebx, %mm3 + + /* + * Disable both L1 and L2 prefetcher. For yet-to-understood reason, + * prefetchers slow down filling cache with rep stos in CQOS mode. + */ + mov $MSR_PREFETCH_CTL, %ecx + rdmsr + or $(PREFETCH_L1_DISABLE | PREFETCH_L2_DISABLE), %eax + wrmsr + +#if (CONFIG_DCACHE_RAM_SIZE == CONFIG_L2_CACHE_SIZE) +/* + * If CAR size is set to full L2 size, mask is calculated as all-zeros. + * This is not supported by the CPU/uCode. + */ +#error "CQOS CAR may not use whole L2 cache area" +#endif + + /* Calculate how many bits to be used for CAR */ + xor %edx, %edx + mov $CONFIG_DCACHE_RAM_SIZE, %eax /* dividend */ + mov $CONFIG_CACHE_QOS_SIZE_PER_BIT, %ecx /* divisor */ + div %ecx /* result is in eax */ + mov %eax, %ecx /* save to ecx */ + mov $1, %ebx + shl %cl, %ebx + sub $1, %ebx /* resulting mask is is in ebx */ + + /* Set this mask for initial cache fill */ + mov $MSR_L2_QOS_MASK(0), %ecx + rdmsr + mov %ebx, %eax + wrmsr + + /* Set CLOS selector to 0 */ + mov $MSR_IA32_PQR_ASSOC, %ecx + rdmsr + and $~MSR_IA32_PQR_ASSOC_MASK, %edx /* select mask 0 */ + wrmsr + + /* We will need to block CAR region from evicts */ + mov $MSR_L2_QOS_MASK(1), %ecx + rdmsr + /* Invert bits that are to be used for cache */ + mov %ebx, %eax + xor $~0, %eax /* invert 32 bits */ + + /* + * Use CBM_LEN_MASK stored in mm3 to set bits based on Capacity Bit + * Mask Length. + */ + movd %mm3, %ebx + and %ebx, %eax + wrmsr + + post_code(0x26) + + /* Clear the cache memory region. This will also fill up the cache */ + movl $CONFIG_DCACHE_RAM_BASE, %edi + movl $CONFIG_DCACHE_RAM_SIZE, %ecx + shr $0x02, %ecx + xor %eax, %eax + cld + rep stosl + + post_code(0x27) + + /* Cache is populated. Use mask 1 that will block evicts */ + mov $MSR_IA32_PQR_ASSOC, %ecx + rdmsr + and $~MSR_IA32_PQR_ASSOC_MASK, %edx /* clear index bits first */ + or $1, %edx /* select mask 1 */ + wrmsr + + /* Enable prefetchers */ + mov $MSR_PREFETCH_CTL, %ecx + rdmsr + and $~(PREFETCH_L1_DISABLE | PREFETCH_L2_DISABLE), %eax + wrmsr + + post_code(0x28) + + jmp car_init_ret + +#elif IS_ENABLED(CONFIG_INTEL_CAR_NEM_ENHANCED) +.global car_nem_enhanced +car_nem_enhanced: + /* Disable cache eviction (setup stage) */ + mov $MSR_EVICT_CTL, %ecx + rdmsr + or $0x1, %eax + wrmsr + post_code(0x26) + + /* Create n-way set associativity of cache */ + xorl %edi, %edi +find_llc_subleaf: + movl %edi, %ecx + movl $0x04, %eax + cpuid + inc %edi + and $0xe0, %al /* EAX[7:5] = Cache Level */ + cmp $0x60, %al /* Check to see if it is LLC */ + jnz find_llc_subleaf + + /* + * Set MSR 0xC91 IA32_L3_MASK_! = 0xE/0xFE/0xFFE/0xFFFE + * for 4/8/16 way of LLC + */ + shr $22, %ebx + inc %ebx + /* Calculate n-way associativity of LLC */ + mov %bl, %cl + + /* + * Maximizing RO cacheability while locking in the CAR to a + * single way since that particular way won't be victim candidate + * for evictions. + * This has been done after programing LLC_WAY_MASK_1 MSR + * with desired LLC way as mentioned below. + * + * Hence create Code and Data Size as per request + * Code Size (RO) : Up to 16M + * Data Size (RW) : Up to 256K + */ + movl $0x01, %eax + /* + * LLC Ways -> LLC_WAY_MASK_1: + * 4: 0x000E + * 8: 0x00FE + * 12: 0x0FFE + * 16: 0xFFFE + * + * These MSRs contain one bit per each way of LLC + * - If this bit is '0' - the way is protected from eviction + * - If this bit is '1' - the way is not protected from eviction + */ + shl %cl, %eax + subl $0x02, %eax + movl $MSR_IA32_L3_MASK_1, %ecx + xorl %edx, %edx + wrmsr + /* + * Set MSR 0xC92 IA32_L3_MASK_2 = 0x1 + * + * For SKL SOC, data size remains 256K consistently. + * Hence, creating 1-way associative cache for Data + */ + mov $MSR_IA32_L3_MASK_2, %ecx + mov $0x01, %eax + xorl %edx, %edx + wrmsr + /* + * Set MSR_IA32_PQR_ASSOC = 0x02 + * + * Possible values: + * 0: Default value, no way mask should be applied + * 1: Apply way mask 1 to LLC + * 2: Apply way mask 2 to LLC + * 3: Shouldn't be use in NEM Mode + */ + movl $MSR_IA32_PQR_ASSOC, %ecx + movl $0x02, %eax + xorl %edx, %edx + wrmsr + + movl $CONFIG_DCACHE_RAM_BASE, %edi + movl $CONFIG_DCACHE_RAM_SIZE, %ecx + shr $0x02, %ecx + xor %eax, %eax + cld + rep stosl + /* + * Set MSR_IA32_PQR_ASSOC = 0x01 + * At this stage we apply LLC_WAY_MASK_1 to the cache. + * i.e. way 0 is protected from eviction. + */ + movl $MSR_IA32_PQR_ASSOC, %ecx + movl $0x01, %eax + xorl %edx, %edx + wrmsr + + post_code(0x27) + /* + * Enable No-Eviction Mode Run State by setting + * NO_EVICT_MODE MSR 2E0h bit [1] = '1'. + */ + + movl $MSR_EVICT_CTL, %ecx + rdmsr + orl $0x02, %eax + wrmsr + + post_code(0x28) + + jmp car_init_ret +#endif + +#if CONFIG_IS_ENABLED(X86_16BIT_INIT) +_dt_ucode_base_size: + /* These next two fields are filled in by binman */ +.globl ucode_base +ucode_base: /* Declared in microcode.h */ + .long 0 /* microcode base */ +.globl ucode_size +ucode_size: /* Declared in microcode.h */ + .long 0 /* microcode size */ + .long CONFIG_SYS_MONITOR_BASE /* code region base */ + .long CONFIG_SYS_MONITOR_LEN /* code region size */ +#endif diff --git a/arch/x86/cpu/intel_common/car2_uninit.S b/arch/x86/cpu/intel_common/car2_uninit.S new file mode 100644 index 0000000000..aba3a5381e --- /dev/null +++ b/arch/x86/cpu/intel_common/car2_uninit.S @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2017 Intel Corp. + * Copyright 2019 Google LLC + * Taken from coreboot file exit_car.S + */ + +#include <config.h> +#include <asm/msr-index.h> +#include <asm/mtrr.h> + +.text +.global car_uninit +car_uninit: + + /* + * Retrieve return address from stack as it will get trashed below if + * execution is utilizing the cache-as-ram stack. + */ + pop %ebx + + /* Disable MTRRs */ + mov $(MTRR_DEF_TYPE_MSR), %ecx + rdmsr + and $(~(MTRR_DEF_TYPE_EN | MTRR_DEF_TYPE_FIX_EN)), %eax + wrmsr + +#ifdef CONFIG_INTEL_CAR_NEM +.global car_nem_teardown +car_nem_teardown: + + /* invalidate cache contents */ + invd + + /* Knock down bit 1 then bit 0 of NEM control not combining steps */ + mov $(MSR_EVICT_CTL), %ecx + rdmsr + and $(~(1 << 1)), %eax + wrmsr + and $(~(1 << 0)), %eax + wrmsr + +#elif IS_ENABLED(CONFIG_INTEL_CAR_CQOS) +.global car_cqos_teardown +car_cqos_teardown: + + /* Go back to all-evicting mode, set both masks to all-1s */ + mov $MSR_L2_QOS_MASK(0), %ecx + rdmsr + mov $~0, %al + wrmsr + + mov $MSR_L2_QOS_MASK(1), %ecx + rdmsr + mov $~0, %al + wrmsr + + /* Reset CLOS selector to 0 */ + mov $MSR_IA32_PQR_ASSOC, %ecx + rdmsr + and $~MSR_IA32_PQR_ASSOC_MASK, %edx + wrmsr + +#elif IS_ENABLED(CONFIG_INTEL_CAR_NEM_ENHANCED) +.global car_nem_enhanced_teardown +car_nem_enhanced_teardown: + + /* invalidate cache contents */ + invd + + /* Knock down bit 1 then bit 0 of NEM control not combining steps */ + mov $(MSR_EVICT_CTL), %ecx + rdmsr + and $(~(1 << 1)), %eax + wrmsr + and $(~(1 << 0)), %eax + wrmsr + + /* Reset CLOS selector to 0 */ + mov $IA32_PQR_ASSOC, %ecx + rdmsr + and $~IA32_PQR_ASSOC_MASK, %edx + wrmsr +#endif + + /* Return to caller */ + jmp *%ebx diff --git a/arch/x86/cpu/intel_common/fast_spi.c b/arch/x86/cpu/intel_common/fast_spi.c new file mode 100644 index 0000000000..a6e3d0a5bf --- /dev/null +++ b/arch/x86/cpu/intel_common/fast_spi.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 Google LLC + */ + +#include <common.h> +#include <asm/io.h> +#include <asm/cpu_common.h> +#include <asm/fast_spi.h> +#include <asm/pci.h> + +/* + * Returns bios_start and fills in size of the BIOS region. + */ +static ulong fast_spi_get_bios_region(struct fast_spi_regs *regs, + uint *bios_size) +{ + ulong bios_start, bios_end; + + /* + * BIOS_BFPREG provides info about BIOS-Flash Primary Region Base and + * Limit. Base and Limit fields are in units of 4K. + */ + u32 val = readl(®s->bfp); + + bios_start = (val & SPIBAR_BFPREG_PRB_MASK) << 12; + bios_end = (((val & SPIBAR_BFPREG_PRL_MASK) >> + SPIBAR_BFPREG_PRL_SHIFT) + 1) << 12; + *bios_size = bios_end - bios_start; + + return bios_start; +} + +int fast_spi_get_bios_mmap(pci_dev_t pdev, ulong *map_basep, uint *map_sizep, + uint *offsetp) +{ + struct fast_spi_regs *regs; + ulong bar, base, mmio_base; + + /* Special case to find mapping without probing the device */ + pci_x86_read_config(pdev, PCI_BASE_ADDRESS_0, &bar, PCI_SIZE_32); + mmio_base = bar & PCI_BASE_ADDRESS_MEM_MASK; + regs = (struct fast_spi_regs *)mmio_base; + base = fast_spi_get_bios_region(regs, map_sizep); + *map_basep = (u32)-*map_sizep - base; + *offsetp = base; + + return 0; +} + +int fast_spi_early_init(pci_dev_t pdev, ulong mmio_base) +{ + /* Program Temporary BAR for SPI */ + pci_x86_write_config(pdev, PCI_BASE_ADDRESS_0, + mmio_base | PCI_BASE_ADDRESS_SPACE_MEMORY, + PCI_SIZE_32); + + /* Enable Bus Master and MMIO Space */ + pci_x86_clrset_config(pdev, PCI_COMMAND, 0, PCI_COMMAND_MASTER | + PCI_COMMAND_MEMORY, PCI_SIZE_8); + + /* + * Disable the BIOS write protect so write commands are allowed. + * Enable Prefetching and caching. + */ + pci_x86_clrset_config(pdev, SPIBAR_BIOS_CONTROL, + SPIBAR_BIOS_CONTROL_EISS | + SPIBAR_BIOS_CONTROL_CACHE_DISABLE, + SPIBAR_BIOS_CONTROL_WPD | + SPIBAR_BIOS_CONTROL_PREFETCH_ENABLE, PCI_SIZE_8); + + return 0; +} diff --git a/arch/x86/cpu/intel_common/lpss.c b/arch/x86/cpu/intel_common/lpss.c new file mode 100644 index 0000000000..26a2d2d1e3 --- /dev/null +++ b/arch/x86/cpu/intel_common/lpss.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Special driver to handle of-platdata + * + * Copyright 2019 Google LLC + * + * Some code from coreboot lpss.c + */ + +#include <common.h> +#include <dm.h> +#include <pci.h> +#include <asm/io.h> +#include <asm/lpss.h> + +enum { + LPSS_RESET_CTL_REG = 0x204, + + /* + * Bit 1:0 controls LPSS controller reset. + * + * 00 ->LPSS Host Controller is in reset (Reset Asserted) + * 01/10 ->Reserved + * 11 ->LPSS Host Controller is NOT at reset (Reset Released) + */ + LPSS_CNT_RST_RELEASE = 3, + + /* Power management control and status register */ + PME_CTRL_STATUS = 0x84, + + /* Bit 1:0 Powerstate, controls D0 and D3 state */ + POWER_STATE_MASK = 3, +}; + +/* Take controller out of reset */ +void lpss_reset_release(void *regs) +{ + writel(LPSS_CNT_RST_RELEASE, regs + LPSS_RESET_CTL_REG); +} + +void lpss_set_power_state(struct udevice *dev, enum lpss_pwr_state state) +{ + dm_pci_clrset_config8(dev, PME_CTRL_STATUS, POWER_STATE_MASK, state); +} |