diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/pci/controller/pci-aardvark.c | 3 | ||||
-rw-r--r-- | drivers/pci/msi/msi.c | 2 | ||||
-rw-r--r-- | drivers/pci/pci-driver.c | 30 | ||||
-rw-r--r-- | drivers/pci/pci-sysfs.c | 108 | ||||
-rw-r--r-- | drivers/pci/pci.c | 51 | ||||
-rw-r--r-- | drivers/pci/pci.h | 18 | ||||
-rw-r--r-- | drivers/pci/pcie/aspm.c | 258 | ||||
-rw-r--r-- | drivers/pci/pcie/dpc.c | 15 | ||||
-rw-r--r-- | drivers/pci/pcie/ptm.c | 300 | ||||
-rw-r--r-- | drivers/pci/probe.c | 13 | ||||
-rw-r--r-- | drivers/pci/quirks.c | 36 | ||||
-rw-r--r-- | drivers/pci/setup-bus.c | 290 | ||||
-rw-r--r-- | drivers/pci/setup-res.c | 11 |
13 files changed, 729 insertions, 406 deletions
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index 966c8b48bd96..7cc51cfb8a13 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -33,6 +33,7 @@ #define PCIE_CORE_DEV_ID_REG 0x0 #define PCIE_CORE_CMD_STATUS_REG 0x4 #define PCIE_CORE_DEV_REV_REG 0x8 +#define PCIE_CORE_SSDEV_ID_REG 0x2c #define PCIE_CORE_PCIEXP_CAP 0xc0 #define PCIE_CORE_PCIERR_CAP 0x100 #define PCIE_CORE_ERR_CAPCTL_REG 0x118 @@ -1077,6 +1078,8 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie) /* Indicates supports for Completion Retry Status */ bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS); + bridge->subsystem_vendor_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) & 0xffff; + bridge->subsystem_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) >> 16; bridge->has_pcie = true; bridge->data = pcie; bridge->ops = &advk_pci_bridge_emul_ops; diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c index 9037a7827eca..fdd2ec09651e 100644 --- a/drivers/pci/msi/msi.c +++ b/drivers/pci/msi/msi.c @@ -526,7 +526,7 @@ static int msix_setup_msi_descs(struct pci_dev *dev, void __iomem *base, desc.pci.msi_attrib.can_mask = !pci_msi_ignore_mask && !desc.pci.msi_attrib.is_virtual; - if (!desc.pci.msi_attrib.can_mask) { + if (desc.pci.msi_attrib.can_mask) { addr = pci_msix_desc_addr(&desc); desc.pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL); } diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 49238ddd39ee..107d77f3c846 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -774,6 +774,12 @@ static int pci_pm_suspend(struct device *dev) pci_dev->skip_bus_pm = false; + /* + * Disabling PTM allows some systems, e.g., Intel mobile chips + * since Coffee Lake, to enter a lower-power PM state. + */ + pci_suspend_ptm(pci_dev); + if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_suspend(dev, PMSG_SUSPEND); @@ -867,20 +873,15 @@ static int pci_pm_suspend_noirq(struct device *dev) } } - if (pci_dev->skip_bus_pm) { + if (!pci_dev->state_saved) { + pci_save_state(pci_dev); + /* - * Either the device is a bridge with a child in D0 below it, or - * the function is running for the second time in a row without - * going through full resume, which is possible only during - * suspend-to-idle in a spurious wakeup case. The device should - * be in D0 at this point, but if it is a bridge, it may be - * necessary to save its state. + * If the device is a bridge with a child in D0 below it, + * it needs to stay in D0, so check skip_bus_pm to avoid + * putting it into a low-power state in that case. */ - if (!pci_dev->state_saved) - pci_save_state(pci_dev); - } else if (!pci_dev->state_saved) { - pci_save_state(pci_dev); - if (pci_power_manageable(pci_dev)) + if (!pci_dev->skip_bus_pm && pci_power_manageable(pci_dev)) pci_prepare_to_sleep(pci_dev); } @@ -987,6 +988,8 @@ static int pci_pm_resume(struct device *dev) if (pci_dev->state_saved) pci_restore_standard_config(pci_dev); + pci_resume_ptm(pci_dev); + if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume(dev); @@ -1274,6 +1277,8 @@ static int pci_pm_runtime_suspend(struct device *dev) pci_power_t prev = pci_dev->current_state; int error; + pci_suspend_ptm(pci_dev); + /* * If pci_dev->driver is not set (unbound), we leave the device in D0, * but it may go to D3cold when the bridge above it runtime suspends. @@ -1335,6 +1340,7 @@ static int pci_pm_runtime_resume(struct device *dev) * D3cold when the bridge above it runtime suspended. */ pci_pm_default_resume_early(pci_dev); + pci_resume_ptm(pci_dev); if (!pci_dev->driver) return 0; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index fc804e08e3cb..0a2eeb82cebd 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -28,6 +28,7 @@ #include <linux/pm_runtime.h> #include <linux/msi.h> #include <linux/of.h> +#include <linux/aperture.h> #include "pci.h" static int sysfs_initialized; /* = 0 */ @@ -1373,6 +1374,112 @@ static const struct attribute_group pci_dev_reset_attr_group = { .is_visible = pci_dev_reset_attr_is_visible, }; +#define pci_dev_resource_resize_attr(n) \ +static ssize_t resource##n##_resize_show(struct device *dev, \ + struct device_attribute *attr, \ + char * buf) \ +{ \ + struct pci_dev *pdev = to_pci_dev(dev); \ + ssize_t ret; \ + \ + pci_config_pm_runtime_get(pdev); \ + \ + ret = sysfs_emit(buf, "%016llx\n", \ + (u64)pci_rebar_get_possible_sizes(pdev, n)); \ + \ + pci_config_pm_runtime_put(pdev); \ + \ + return ret; \ +} \ + \ +static ssize_t resource##n##_resize_store(struct device *dev, \ + struct device_attribute *attr,\ + const char *buf, size_t count)\ +{ \ + struct pci_dev *pdev = to_pci_dev(dev); \ + unsigned long size, flags; \ + int ret, i; \ + u16 cmd; \ + \ + if (kstrtoul(buf, 0, &size) < 0) \ + return -EINVAL; \ + \ + device_lock(dev); \ + if (dev->driver) { \ + ret = -EBUSY; \ + goto unlock; \ + } \ + \ + pci_config_pm_runtime_get(pdev); \ + \ + if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) { \ + ret = aperture_remove_conflicting_pci_devices(pdev, \ + "resourceN_resize"); \ + if (ret) \ + goto pm_put; \ + } \ + \ + pci_read_config_word(pdev, PCI_COMMAND, &cmd); \ + pci_write_config_word(pdev, PCI_COMMAND, \ + cmd & ~PCI_COMMAND_MEMORY); \ + \ + flags = pci_resource_flags(pdev, n); \ + \ + pci_remove_resource_files(pdev); \ + \ + for (i = 0; i < PCI_STD_NUM_BARS; i++) { \ + if (pci_resource_len(pdev, i) && \ + pci_resource_flags(pdev, i) == flags) \ + pci_release_resource(pdev, i); \ + } \ + \ + ret = pci_resize_resource(pdev, n, size); \ + \ + pci_assign_unassigned_bus_resources(pdev->bus); \ + \ + if (pci_create_resource_files(pdev)) \ + pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");\ + \ + pci_write_config_word(pdev, PCI_COMMAND, cmd); \ +pm_put: \ + pci_config_pm_runtime_put(pdev); \ +unlock: \ + device_unlock(dev); \ + \ + return ret ? ret : count; \ +} \ +static DEVICE_ATTR_RW(resource##n##_resize) + +pci_dev_resource_resize_attr(0); +pci_dev_resource_resize_attr(1); +pci_dev_resource_resize_attr(2); +pci_dev_resource_resize_attr(3); +pci_dev_resource_resize_attr(4); +pci_dev_resource_resize_attr(5); + +static struct attribute *resource_resize_attrs[] = { + &dev_attr_resource0_resize.attr, + &dev_attr_resource1_resize.attr, + &dev_attr_resource2_resize.attr, + &dev_attr_resource3_resize.attr, + &dev_attr_resource4_resize.attr, + &dev_attr_resource5_resize.attr, + NULL, +}; + +static umode_t resource_resize_is_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); + + return pci_rebar_get_current_size(pdev, n) < 0 ? 0 : a->mode; +} + +static const struct attribute_group pci_dev_resource_resize_group = { + .attrs = resource_resize_attrs, + .is_visible = resource_resize_is_visible, +}; + int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev) { if (!sysfs_initialized) @@ -1494,6 +1601,7 @@ const struct attribute_group *pci_dev_groups[] = { #ifdef CONFIG_ACPI &pci_dev_acpi_attr_group, #endif + &pci_dev_resource_resize_group, NULL, }; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 95bc329e74c0..2127aba3550b 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -66,13 +66,15 @@ struct pci_pme_device { static void pci_dev_d3_sleep(struct pci_dev *dev) { - unsigned int delay = dev->d3hot_delay; - - if (delay < pci_pm_d3hot_delay) - delay = pci_pm_d3hot_delay; - - if (delay) - msleep(delay); + unsigned int delay_ms = max(dev->d3hot_delay, pci_pm_d3hot_delay); + unsigned int upper; + + if (delay_ms) { + /* Use a 20% upper bound, 1ms minimum */ + upper = max(DIV_ROUND_CLOSEST(delay_ms, 5), 1U); + usleep_range(delay_ms * USEC_PER_MSEC, + (delay_ms + upper) * USEC_PER_MSEC); + } } bool pci_reset_supported(struct pci_dev *dev) @@ -1663,6 +1665,7 @@ int pci_save_state(struct pci_dev *dev) return i; pci_save_ltr_state(dev); + pci_save_aspm_l1ss_state(dev); pci_save_dpc_state(dev); pci_save_aer_state(dev); pci_save_ptm_state(dev); @@ -1769,6 +1772,7 @@ void pci_restore_state(struct pci_dev *dev) * LTR itself (in the PCIe capability). */ pci_restore_ltr_state(dev); + pci_restore_aspm_l1ss_state(dev); pci_restore_pcie_state(dev); pci_restore_pasid_state(dev); @@ -2706,24 +2710,12 @@ int pci_prepare_to_sleep(struct pci_dev *dev) if (target_state == PCI_POWER_ERROR) return -EIO; - /* - * There are systems (for example, Intel mobile chips since Coffee - * Lake) where the power drawn while suspended can be significantly - * reduced by disabling PTM on PCIe root ports as this allows the - * port to enter a lower-power PM state and the SoC to reach a - * lower-power idle state as a whole. - */ - if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) - pci_disable_ptm(dev); - pci_enable_wake(dev, target_state, wakeup); error = pci_set_power_state(dev, target_state); - if (error) { + if (error) pci_enable_wake(dev, target_state, false); - pci_restore_ptm_state(dev); - } return error; } @@ -2764,24 +2756,12 @@ int pci_finish_runtime_suspend(struct pci_dev *dev) if (target_state == PCI_POWER_ERROR) return -EIO; - /* - * There are systems (for example, Intel mobile chips since Coffee - * Lake) where the power drawn while suspended can be significantly - * reduced by disabling PTM on PCIe root ports as this allows the - * port to enter a lower-power PM state and the SoC to reach a - * lower-power idle state as a whole. - */ - if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) - pci_disable_ptm(dev); - __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev)); error = pci_set_power_state(dev, target_state); - if (error) { + if (error) pci_enable_wake(dev, target_state, false); - pci_restore_ptm_state(dev); - } return error; } @@ -3485,6 +3465,11 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev) if (error) pci_err(dev, "unable to allocate suspend buffer for LTR\n"); + error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_L1SS, + 2 * sizeof(u32)); + if (error) + pci_err(dev, "unable to allocate suspend buffer for ASPM-L1SS\n"); + pci_allocate_vc_save_buffers(dev); } diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 785f31086313..46af1195c047 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -505,13 +505,17 @@ static inline int pci_iov_bus_range(struct pci_bus *bus) #endif /* CONFIG_PCI_IOV */ #ifdef CONFIG_PCIE_PTM +void pci_ptm_init(struct pci_dev *dev); void pci_save_ptm_state(struct pci_dev *dev); void pci_restore_ptm_state(struct pci_dev *dev); -void pci_disable_ptm(struct pci_dev *dev); +void pci_suspend_ptm(struct pci_dev *dev); +void pci_resume_ptm(struct pci_dev *dev); #else +static inline void pci_ptm_init(struct pci_dev *dev) { } static inline void pci_save_ptm_state(struct pci_dev *dev) { } static inline void pci_restore_ptm_state(struct pci_dev *dev) { } -static inline void pci_disable_ptm(struct pci_dev *dev) { } +static inline void pci_suspend_ptm(struct pci_dev *dev) { } +static inline void pci_resume_ptm(struct pci_dev *dev) { } #endif unsigned long pci_cardbus_resource_alignment(struct resource *); @@ -561,10 +565,14 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active); void pcie_aspm_init_link_state(struct pci_dev *pdev); void pcie_aspm_exit_link_state(struct pci_dev *pdev); void pcie_aspm_powersave_config_link(struct pci_dev *pdev); +void pci_save_aspm_l1ss_state(struct pci_dev *dev); +void pci_restore_aspm_l1ss_state(struct pci_dev *dev); #else static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { } static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { } static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { } +static inline void pci_save_aspm_l1ss_state(struct pci_dev *dev) { } +static inline void pci_restore_aspm_l1ss_state(struct pci_dev *dev) { } #endif #ifdef CONFIG_PCIE_ECRC @@ -575,12 +583,6 @@ static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { } static inline void pcie_ecrc_get_policy(char *str) { } #endif -#ifdef CONFIG_PCIE_PTM -void pci_ptm_init(struct pci_dev *dev); -#else -static inline void pci_ptm_init(struct pci_dev *dev) { } -#endif - struct pci_dev_reset_methods { u16 vendor; u16 device; diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index a8aec190986c..53a1fa306e1e 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -8,6 +8,7 @@ */ #include <linux/kernel.h> +#include <linux/math.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/pci.h> @@ -350,29 +351,43 @@ static u32 calc_l1ss_pwron(struct pci_dev *pdev, u32 scale, u32 val) return 0; } +/* + * Encode an LTR_L1.2_THRESHOLD value for the L1 PM Substates Control 1 + * register. Ports enter L1.2 when the most recent LTR value is greater + * than or equal to LTR_L1.2_THRESHOLD, so we round up to make sure we + * don't enter L1.2 too aggressively. + * + * See PCIe r6.0, sec 5.5.1, 6.18, 7.8.3.3. + */ static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value) { - u32 threshold_ns = threshold_us * 1000; + u64 threshold_ns = (u64) threshold_us * 1000; - /* See PCIe r3.1, sec 7.33.3 and sec 6.18 */ - if (threshold_ns < 32) { - *scale = 0; + /* + * LTR_L1.2_THRESHOLD_Value ("value") is a 10-bit field with max + * value of 0x3ff. + */ + if (threshold_ns <= 0x3ff * 1) { + *scale = 0; /* Value times 1ns */ *value = threshold_ns; - } else if (threshold_ns < 1024) { - *scale = 1; - *value = threshold_ns >> 5; - } else if (threshold_ns < 32768) { - *scale = 2; - *value = threshold_ns >> 10; - } else if (threshold_ns < 1048576) { - *scale = 3; - *value = threshold_ns >> 15; - } else if (threshold_ns < 33554432) { - *scale = 4; - *value = threshold_ns >> 20; + } else if (threshold_ns <= 0x3ff * 32) { + *scale = 1; /* Value times 32ns */ + *value = roundup(threshold_ns, 32) / 32; + } else if (threshold_ns <= 0x3ff * 1024) { + *scale = 2; /* Value times 1024ns */ + *value = roundup(threshold_ns, 1024) / 1024; + } else if (threshold_ns <= 0x3ff * 32768) { + *scale = 3; /* Value times 32768ns */ + *value = roundup(threshold_ns, 32768) / 32768; + } else if (threshold_ns <= 0x3ff * 1048576) { + *scale = 4; /* Value times 1048576ns */ + *value = roundup(threshold_ns, 1048576) / 1048576; + } else if (threshold_ns <= 0x3ff * (u64) 33554432) { + *scale = 5; /* Value times 33554432ns */ + *value = roundup(threshold_ns, 33554432) / 33554432; } else { *scale = 5; - *value = threshold_ns >> 25; + *value = 0x3ff; /* Max representable value */ } } @@ -455,6 +470,31 @@ static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos, pci_write_config_dword(pdev, pos, val); } +static void aspm_program_l1ss(struct pci_dev *dev, u32 ctl1, u32 ctl2) +{ + u16 l1ss = dev->l1ss; + u32 l1_2_enable; + + /* + * Per PCIe r6.0, sec 5.5.4, T_POWER_ON in PCI_L1SS_CTL2 must be + * programmed prior to setting the L1.2 enable bits in PCI_L1SS_CTL1. + */ + pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL2, ctl2); + + /* + * In addition, Common_Mode_Restore_Time and LTR_L1.2_THRESHOLD in + * PCI_L1SS_CTL1 must be programmed *before* setting the L1.2 + * enable bits, even though they're all in PCI_L1SS_CTL1. + */ + l1_2_enable = ctl1 & PCI_L1SS_CTL1_L1_2_MASK; + ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK; + + pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL1, ctl1); + if (l1_2_enable) + pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL1, + ctl1 | l1_2_enable); +} + /* Calculate L1.2 PM substate timing parameters */ static void aspm_calc_l1ss_info(struct pcie_link_state *link, u32 parent_l1ss_cap, u32 child_l1ss_cap) @@ -464,7 +504,6 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link, u32 t_common_mode, t_power_on, l1_2_threshold, scale, value; u32 ctl1 = 0, ctl2 = 0; u32 pctl1, pctl2, cctl1, cctl2; - u32 pl1_2_enables, cl1_2_enables; if (!(link->aspm_support & ASPM_STATE_L1_2_MASK)) return; @@ -513,39 +552,78 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link, ctl2 == pctl2 && ctl2 == cctl2) return; - /* Disable L1.2 while updating. See PCIe r5.0, sec 5.5.4, 7.8.3.3 */ - pl1_2_enables = pctl1 & PCI_L1SS_CTL1_L1_2_MASK; - cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK; + pctl1 &= ~(PCI_L1SS_CTL1_CM_RESTORE_TIME | + PCI_L1SS_CTL1_LTR_L12_TH_VALUE | + PCI_L1SS_CTL1_LTR_L12_TH_SCALE); + pctl1 |= (ctl1 & (PCI_L1SS_CTL1_CM_RESTORE_TIME | + PCI_L1SS_CTL1_LTR_L12_TH_VALUE | + PCI_L1SS_CTL1_LTR_L12_TH_SCALE)); + aspm_program_l1ss(parent, pctl1, ctl2); + + cctl1 &= ~(PCI_L1SS_CTL1_CM_RESTORE_TIME | + PCI_L1SS_CTL1_LTR_L12_TH_VALUE | + PCI_L1SS_CTL1_LTR_L12_TH_SCALE); + cctl1 |= (ctl1 & (PCI_L1SS_CTL1_CM_RESTORE_TIME | + PCI_L1SS_CTL1_LTR_L12_TH_VALUE | + PCI_L1SS_CTL1_LTR_L12_TH_SCALE)); + aspm_program_l1ss(child, cctl1, ctl2); +} - if (pl1_2_enables || cl1_2_enables) { - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1_2_MASK, 0); - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1_2_MASK, 0); - } +static void aspm_l1ss_init(struct pcie_link_state *link) +{ + struct pci_dev *child = link->downstream, *parent = link->pdev; + u32 parent_l1ss_cap, child_l1ss_cap; + u32 parent_l1ss_ctl1 = 0, child_l1ss_ctl1 = 0; - /* Program T_POWER_ON times in both ports */ - pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2); - pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2); + if (!parent->l1ss || !child->l1ss) + return; - /* Program Common_Mode_Restore_Time in upstream device */ - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1); + /* Setup L1 substate */ + pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP, + &parent_l1ss_cap); + pci_read_config_dword(child, child->l1ss + PCI_L1SS_CAP, + &child_l1ss_cap); - /* Program LTR_L1.2_THRESHOLD time in both ports */ - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_LTR_L12_TH_VALUE | - PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1); - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_LTR_L12_TH_VALUE | - PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1); - - if (pl1_2_enables || cl1_2_enables) { - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0, - pl1_2_enables); - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0, - cl1_2_enables); - } + if (!(parent_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS)) + parent_l1ss_cap = 0; + if (!(child_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS)) + child_l1ss_cap = 0; + + /* + * If we don't have LTR for the entire path from the Root Complex + * to this device, we can't use ASPM L1.2 because it relies on the + * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18. + */ + if (!child->ltr_path) + child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2; + + if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1) + link->aspm_support |= ASPM_STATE_L1_1; + if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2) + link->aspm_support |= ASPM_STATE_L1_2; + if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1) + link->aspm_support |= ASPM_STATE_L1_1_PCIPM; + if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2) + link->aspm_support |= ASPM_STATE_L1_2_PCIPM; + + if (parent_l1ss_cap) + pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + &parent_l1ss_ctl1); + if (child_l1ss_cap) + pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1, + &child_l1ss_ctl1); + + if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1) + link->aspm_enabled |= ASPM_STATE_L1_1; + if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2) + link->aspm_enabled |= ASPM_STATE_L1_2; + if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1) + link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM; + if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2) + link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM; + + if (link->aspm_support & ASPM_STATE_L1SS) + aspm_calc_l1ss_info(link, parent_l1ss_cap, child_l1ss_cap); } static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) @@ -553,8 +631,6 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) struct pci_dev *child = link->downstream, *parent = link->pdev; u32 parent_lnkcap, child_lnkcap; u16 parent_lnkctl, child_lnkctl; - u32 parent_l1ss_cap, child_l1ss_cap; - u32 parent_l1ss_ctl1 = 0, child_l1ss_ctl1 = 0; struct pci_bus *linkbus = parent->subordinate; if (blacklist) { @@ -609,52 +685,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1) link->aspm_enabled |= ASPM_STATE_L1; - /* Setup L1 substate */ - pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP, - &parent_l1ss_cap); - pci_read_config_dword(child, child->l1ss + PCI_L1SS_CAP, - &child_l1ss_cap); - - if (!(parent_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS)) - parent_l1ss_cap = 0; - if (!(child_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS)) - child_l1ss_cap = 0; - - /* - * If we don't have LTR for the entire path from the Root Complex - * to this device, we can't use ASPM L1.2 because it relies on the - * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18. - */ - if (!child->ltr_path) - child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2; - - if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1) - link->aspm_support |= ASPM_STATE_L1_1; - if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2) - link->aspm_support |= ASPM_STATE_L1_2; - if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1) - link->aspm_support |= ASPM_STATE_L1_1_PCIPM; - if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2) - link->aspm_support |= ASPM_STATE_L1_2_PCIPM; - - if (parent_l1ss_cap) - pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - &parent_l1ss_ctl1); - if (child_l1ss_cap) - pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1, - &child_l1ss_ctl1); - - if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1) - link->aspm_enabled |= ASPM_STATE_L1_1; - if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2) - link->aspm_enabled |= ASPM_STATE_L1_2; - if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1) - link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM; - if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2) - link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM; - - if (link->aspm_support & ASPM_STATE_L1SS) - aspm_calc_l1ss_info(link, parent_l1ss_cap, child_l1ss_cap); + aspm_l1ss_init(link); /* Save default state */ link->aspm_default = link->aspm_enabled; @@ -726,6 +757,43 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) PCI_L1SS_CTL1_L1SS_MASK, val); } +void pci_save_aspm_l1ss_state(struct pci_dev *dev) +{ + struct pci_cap_saved_state *save_state; + u16 l1ss = dev->l1ss; + u32 *cap; + + if (!l1ss) + return; + + save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS); + if (!save_state) + return; + + cap = (u32 *)&save_state->cap.data[0]; + pci_read_config_dword(dev, l1ss + PCI_L1SS_CTL2, cap++); + pci_read_config_dword(dev, l1ss + PCI_L1SS_CTL1, cap++); +} + +void pci_restore_aspm_l1ss_state(struct pci_dev *dev) +{ + struct pci_cap_saved_state *save_state; + u32 *cap, ctl1, ctl2; + u16 l1ss = dev->l1ss; + + if (!l1ss) + return; + + save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS); + if (!save_state) + return; + + cap = (u32 *)&save_state->cap.data[0]; + ctl2 = *cap++; + ctl1 = *cap; + aspm_program_l1ss(dev, ctl1, ctl2); +} + static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) { pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL, diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c index 3e9afee02e8d..f5ffea17c7f8 100644 --- a/drivers/pci/pcie/dpc.c +++ b/drivers/pci/pcie/dpc.c @@ -335,11 +335,16 @@ void pci_dpc_init(struct pci_dev *pdev) return; pdev->dpc_rp_extensions = true; - pdev->dpc_rp_log_size = (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8; - if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) { - pci_err(pdev, "RP PIO log size %u is invalid\n", - pdev->dpc_rp_log_size); - pdev->dpc_rp_log_size = 0; + + /* Quirks may set dpc_rp_log_size if device or firmware is buggy */ + if (!pdev->dpc_rp_log_size) { + pdev->dpc_rp_log_size = + (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8; + if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) { + pci_err(pdev, "RP PIO log size %u is invalid\n", + pdev->dpc_rp_log_size); + pdev->dpc_rp_log_size = 0; + } } } diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c index 368a254e3124..b4e5f553467c 100644 --- a/drivers/pci/pcie/ptm.c +++ b/drivers/pci/pcie/ptm.c @@ -9,30 +9,38 @@ #include <linux/pci.h> #include "../pci.h" -static void pci_ptm_info(struct pci_dev *dev) +/* + * If the next upstream device supports PTM, return it; otherwise return + * NULL. PTM Messages are local, so both link partners must support it. + */ +static struct pci_dev *pci_upstream_ptm(struct pci_dev *dev) { - char clock_desc[8]; + struct pci_dev *ups = pci_upstream_bridge(dev); - switch (dev->ptm_granularity) { - case 0: - snprintf(clock_desc, sizeof(clock_desc), "unknown"); - break; - case 255: - snprintf(clock_desc, sizeof(clock_desc), ">254ns"); - break; - default: - snprintf(clock_desc, sizeof(clock_desc), "%uns", - dev->ptm_granularity); - break; - } - pci_info(dev, "PTM enabled%s, %s granularity\n", - dev->ptm_root ? " (root)" : "", clock_desc); + /* + * Switch Downstream Ports are not permitted to have a PTM + * capability; their PTM behavior is controlled by the Upstream + * Port (PCIe r5.0, sec 7.9.16), so if the upstream bridge is a + * Switch Downstream Port, look up one more level. + */ + if (ups && pci_pcie_type(ups) == PCI_EXP_TYPE_DOWNSTREAM) + ups = pci_upstream_bridge(ups); + + if (ups && ups->ptm_cap) + return ups; + + return NULL; } -void pci_disable_ptm(struct pci_dev *dev) +/* + * Find the PTM Capability (if present) and extract the information we need + * to use it. + */ +void pci_ptm_init(struct pci_dev *dev) { - int ptm; - u16 ctrl; + u16 ptm; + u32 cap; + struct pci_dev *ups; if (!pci_is_pcie(dev)) return; @@ -41,21 +49,47 @@ void pci_disable_ptm(struct pci_dev *dev) if (!ptm) return; - pci_read_config_word(dev, ptm + PCI_PTM_CTRL, &ctrl); - ctrl &= ~(PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT); - pci_write_config_word(dev, ptm + PCI_PTM_CTRL, ctrl); + dev->ptm_cap = ptm; + pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_PTM, sizeof(u32)); + + pci_read_config_dword(dev, ptm + PCI_PTM_CAP, &cap); + dev->ptm_granularity = (cap & PCI_PTM_GRANULARITY_MASK) >> 8; + + /* + * Per the spec recommendation (PCIe r6.0, sec 7.9.15.3), select the + * furthest upstream Time Source as the PTM Root. For Endpoints, + * "the Effective Granularity is the maximum Local Clock Granularity + * reported by the PTM Root and all intervening PTM Time Sources." + */ + ups = pci_upstream_ptm(dev); + if (ups) { + if (ups->ptm_granularity == 0) + dev->ptm_granularity = 0; + else if (ups->ptm_granularity > dev->ptm_granularity) + dev->ptm_granularity = ups->ptm_granularity; + } else if (cap & PCI_PTM_CAP_ROOT) { + dev->ptm_root = 1; + } else if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) { + + /* + * Per sec 7.9.15.3, this should be the Local Clock + * Granularity of the associated Time Source. But it + * doesn't say how to find that Time Source. + */ + dev->ptm_granularity = 0; + } + + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || + pci_pcie_type(dev) == PCI_EXP_TYPE_UPSTREAM) + pci_enable_ptm(dev, NULL); } void pci_save_ptm_state(struct pci_dev *dev) { - int ptm; + u16 ptm = dev->ptm_cap; struct pci_cap_saved_state *save_state; - u16 *cap; + u32 *cap; - if (!pci_is_pcie(dev)) - return; - - ptm = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM); if (!ptm) return; @@ -63,146 +97,152 @@ void pci_save_ptm_state(struct pci_dev *dev) if (!save_state) return; - cap = (u16 *)&save_state->cap.data[0]; - pci_read_config_word(dev, ptm + PCI_PTM_CTRL, cap); + cap = (u32 *)&save_state->cap.data[0]; + pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, cap); } void pci_restore_ptm_state(struct pci_dev *dev) { + u16 ptm = dev->ptm_cap; struct pci_cap_saved_state *save_state; - int ptm; - u16 *cap; + u32 *cap; - if (!pci_is_pcie(dev)) + if (!ptm) return; save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM); - ptm = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM); - if (!save_state || !ptm) + if (!save_state) return; - cap = (u16 *)&save_state->cap.data[0]; - pci_write_config_word(dev, ptm + PCI_PTM_CTRL, *cap); + cap = (u32 *)&save_state->cap.data[0]; + pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, *cap); } -void pci_ptm_init(struct pci_dev *dev) +/* Enable PTM in the Control register if possible */ +static int __pci_enable_ptm(struct pci_dev *dev) { - int pos; - u32 cap, ctrl; - u8 local_clock; + u16 ptm = dev->ptm_cap; struct pci_dev *ups; |