From: Mark Langsdorf <mark.langsdorf@amd.com>
To: Pavel Machek <pavel@suse.cz>, <joachim.deguara@amd.com>
Cc: <gregkh@ucw.cz>, <tglx@linutronix.de>, <mingo@redhat.com>,
<hpa@zytor.com>, <linux-kernel@vger.kernel.org>
Subject: Re: [PATCH 01/01][retry 1] x86: L3 cache index disable for 2.6.26
Date: Tue, 12 Aug 2008 11:04:08 -0500 [thread overview]
Message-ID: <200808121104.09288.mark.langsdorf@amd.com> (raw)
In-Reply-To: <20080808220011.GA12156@ucw.cz>
On Friday 08 August 2008, Pavel Machek wrote:
> I think there's one-value-per-file rule in sysfs...
>
> I guess it is better to return -EOPNOTSUP (or something) instead of
> english text explaining that...
>
> No, really, what you created is impossible to parse -- /proc like
> nightmare.
Okay, this is a simpler version that includes most of Ingo's
clean-ups and style changes. It only displays the two
cache index values. Is this acceptable?
New versions of AMD processors have support to disable parts
of their L3 caches if too many MCEs are generated by the
L3 cache.
This patch provides a /sysfs interface under the cache
hierarchy to display which caches indices are disabled
(if any) and to ALLOW monitoring applications to disable a
cache index.
This patch does not set an automatic policy to disable
the L3 cache. Policy decisions would need to be made
by a RAS handler. This patch merely makes it easier to
see what indices are currently disabled.
Signed-off-by: Mark Langsdorf <mark.langsdorf@amd.com>
diff -r f3f819497a68 arch/x86/kernel/cpu/intel_cacheinfo.c
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c Thu Aug 07 04:24:53 2008 -0500
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c Tue Aug 12 05:29:37 2008 -0500
@@ -130,6 +130,7 @@ struct _cpuid4_info {
union _cpuid4_leaf_ebx ebx;
union _cpuid4_leaf_ecx ecx;
unsigned long size;
+ unsigned long can_disable;
cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */
};
@@ -251,6 +252,14 @@ static void __cpuinit amd_cpuid4(int lea
(ebx->split.ways_of_associativity + 1) - 1;
}
+static void __cpuinit
+amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
+{
+ if (index < 3)
+ return;
+ this_leaf->can_disable = 1;
+}
+
static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
{
union _cpuid4_leaf_eax eax;
@@ -258,10 +267,13 @@ static int __cpuinit cpuid4_cache_lookup
union _cpuid4_leaf_ecx ecx;
unsigned edx;
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
amd_cpuid4(index, &eax, &ebx, &ecx);
- else
- cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
+ if (boot_cpu_data.x86 >= 0x10)
+ amd_check_l3_disable(index, this_leaf);
+ } else {
+ cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
+ }
if (eax.split.type == CACHE_TYPE_NULL)
return -EIO; /* better error ? */
@@ -269,9 +281,9 @@ static int __cpuinit cpuid4_cache_lookup
this_leaf->ebx = ebx;
this_leaf->ecx = ecx;
this_leaf->size = (ecx.split.number_of_sets + 1) *
- (ebx.split.coherency_line_size + 1) *
- (ebx.split.physical_line_partition + 1) *
- (ebx.split.ways_of_associativity + 1);
+ (ebx.split.coherency_line_size + 1) *
+ (ebx.split.physical_line_partition + 1) *
+ (ebx.split.ways_of_associativity + 1);
return 0;
}
@@ -574,6 +586,9 @@ static DEFINE_PER_CPU(struct _index_kobj
static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
+#define to_object(k) container_of(k, struct _index_kobject, kobj)
+#define to_attr(a) container_of(a, struct _cache_attr, attr)
+
#define show_one_plus(file_name, object, val) \
static ssize_t show_##file_name \
(struct _cpuid4_info *this_leaf, char *buf) \
@@ -618,6 +633,83 @@ static inline ssize_t show_shared_cpu_li
static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
{
return show_shared_cpu_map_func(leaf, 1, buf);
+}
+
+#if defined(CONFIG_PCI) && defined(CONFIG_K8_NB)
+#include <linux/pci.h>
+#include <asm/k8.h>
+static struct pci_dev *get_k8_northbridge(int node)
+{
+ return k8_northbridges[node];
+}
+#else
+static inline int pci_write_config_dword(struct pci_dev *dev, int where,
+ u32 val)
+{
+ return 0;
+}
+
+static inline int pci_read_config_dword(struct pci_dev *dev, int where,
+ u32 *val)
+{
+ return 0;
+}
+
+static struct pci_dev *get_k8_northbridge(int node)
+{
+ return NULL;
+}
+#endif
+
+static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
+{
+ int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
+ struct pci_dev *dev = get_k8_northbridge(node);
+ ssize_t ret = 0;
+ int i;
+
+ if (!this_leaf->can_disable)
+ return sprintf(buf, "-1");
+
+ for (i = 0; i < 2; i++) {
+ unsigned int reg = 0;
+
+ pci_read_config_dword(dev, 0x1BC + i * 4, ®);
+
+ ret += sprintf(buf, "%s %x\t", buf, reg);
+ }
+ ret += sprintf(buf,"%s\n", buf);
+
+ return ret;
+}
+
+static ssize_t
+store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
+ size_t count)
+{
+ int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
+ struct pci_dev *dev = get_k8_northbridge(node);
+ ssize_t ret = 0;
+ unsigned int index, val;
+
+ if (!this_leaf->can_disable)
+ return 0;
+
+ if (strlen(buf) > 15)
+ return -EINVAL;
+
+ ret = sscanf(buf, "%x %x\n", &index, &val);
+ if (ret != 2)
+ return -EINVAL;
+ if (index > 1)
+ return -EINVAL;
+
+ val |= 0xc0000000;
+ pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
+ wbinvd();
+ pci_write_config_dword(dev, 0x1BC + index * 4, val);
+
+ return strlen(buf);
}
static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
@@ -657,6 +749,8 @@ define_one_ro(shared_cpu_map);
define_one_ro(shared_cpu_map);
define_one_ro(shared_cpu_list);
+static struct _cache_attr cache_disable = __ATTR(cache_disable, 0644, show_cache_disable, store_cache_disable);
+
static struct attribute * default_attrs[] = {
&type.attr,
&level.attr,
@@ -667,11 +761,9 @@ static struct attribute * default_attrs[
&size.attr,
&shared_cpu_map.attr,
&shared_cpu_list.attr,
+ &cache_disable.attr,
NULL
};
-
-#define to_object(k) container_of(k, struct _index_kobject, kobj)
-#define to_attr(a) container_of(a, struct _cache_attr, attr)
static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
{
@@ -689,7 +781,15 @@ static ssize_t store(struct kobject * ko
static ssize_t store(struct kobject * kobj, struct attribute * attr,
const char * buf, size_t count)
{
- return 0;
+ struct _cache_attr *fattr = to_attr(attr);
+ struct _index_kobject *this_leaf = to_object(kobj);
+ ssize_t ret;
+
+ ret = fattr->store ?
+ fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
+ buf, count) :
+ 0;
+ return ret;
}
static struct sysfs_ops sysfs_ops = {
next prev parent reply other threads:[~2008-08-12 15:59 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2008-07-18 21:03 [PATCH 01/01] x86: L3 cache index disable for 2.6.26 Mark Langsdorf
2008-07-21 11:37 ` Ingo Molnar
2008-07-21 12:48 ` Ingo Molnar
2008-07-22 18:06 ` Mark Langsdorf
2008-07-28 14:22 ` Ingo Molnar
2008-07-28 14:49 ` Ingo Molnar
2008-07-28 14:54 ` Langsdorf, Mark
2008-08-08 22:00 ` Pavel Machek
2008-08-12 16:04 ` Mark Langsdorf [this message]
2008-08-12 21:56 ` [PATCH 01/01][retry 1] " Pavel Machek
2008-08-12 22:01 ` Langsdorf, Mark
2008-08-12 22:07 ` Pavel Machek
2008-08-12 22:53 ` Greg KH
2008-08-12 22:12 ` Greg KH
2008-08-13 20:02 ` [PATCH 01/01][retry 2] " Mark Langsdorf
2008-08-13 20:38 ` Pavel Machek
2008-08-13 23:45 ` Greg KH
2008-08-14 13:43 ` [PATCH 01/01][retry 3] " Mark Langsdorf
2008-08-14 13:44 ` Pavel Machek
2008-08-14 14:02 ` Langsdorf, Mark
2008-08-14 15:46 ` Pavel Machek
2008-08-14 16:41 ` Langsdorf, Mark
2008-08-14 14:04 ` Greg KH
2008-08-14 14:23 ` [PATCH 01/01][retry 4] " Mark Langsdorf
2008-08-14 16:48 ` [PATCH 01/01][retry 5] " Mark Langsdorf
2008-08-14 17:10 ` Greg KH
2008-08-14 18:32 ` Mark Langsdorf
2008-08-15 16:42 ` Ingo Molnar
2008-08-15 19:21 ` Langsdorf, Mark
2008-08-15 19:57 ` Ingo Molnar
2008-08-15 20:02 ` Langsdorf, Mark
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=200808121104.09288.mark.langsdorf@amd.com \
--to=mark.langsdorf@amd.com \
--cc=gregkh@ucw.cz \
--cc=hpa@zytor.com \
--cc=joachim.deguara@amd.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@redhat.com \
--cc=pavel@suse.cz \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox