Linux CXL
 help / color / mirror / Atom feed
From: Jonathan Cameron <Jonathan.Cameron@huawei.com>
To: <fan.ni@samsung.com>, <linux-cxl@vger.kernel.org>,
	<qemu-devel@nongnu.org>
Cc: "Alex Bennée" <alex.bennee@linaro.org>,
	"Alexandre Iooss" <erdnaxe@crans.org>,
	"Mahmoud Mandour" <ma.mandourr@gmail.com>,
	"Pierrick Bouvier" <pierrick.bouvier@linaro.org>,
	linuxarm@huawei.com, "Niyas Sait" <niyas.sait@huawei.com>
Subject: [RFC PATCH QEMU 2/3] plugins: Add cache miss reporting over a socket.
Date: Fri, 24 Jan 2025 17:29:04 +0000	[thread overview]
Message-ID: <20250124172905.84099-3-Jonathan.Cameron@huawei.com> (raw)
In-Reply-To: <20250124172905.84099-1-Jonathan.Cameron@huawei.com>

This allows an external program to act as a hotness tracker.

Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
---
 contrib/plugins/cache.c | 75 +++++++++++++++++++++++++++++++++++++----
 1 file changed, 68 insertions(+), 7 deletions(-)

diff --git a/contrib/plugins/cache.c b/contrib/plugins/cache.c
index 7baff86860..5af1e6559c 100644
--- a/contrib/plugins/cache.c
+++ b/contrib/plugins/cache.c
@@ -7,10 +7,17 @@
 
 #include <inttypes.h>
 #include <stdio.h>
+#include <unistd.h>
 #include <glib.h>
+#include <sys/socket.h>
+#include <arpa/inet.h>
 
 #include <qemu-plugin.h>
 
+static int client_socket = -1;
+static uint64_t missfilterbase;
+static uint64_t missfiltersize;
+
 #define STRTOLL(x) g_ascii_strtoll(x, NULL, 10)
 
 QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
@@ -104,6 +111,7 @@ static Cache **l2_ucaches;
 static GMutex *l1_dcache_locks;
 static GMutex *l1_icache_locks;
 static GMutex *l2_ucache_locks;
+static GMutex *socket_lock;
 
 static uint64_t l1_dmem_accesses;
 static uint64_t l1_imem_accesses;
@@ -385,6 +393,21 @@ static bool access_cache(Cache *cache, uint64_t addr)
     return false;
 }
 
+static void miss(uint64_t paddr)
+{
+    if (client_socket < 0) {
+        return;
+    }
+
+    if (paddr < missfilterbase || paddr >= missfilterbase + missfiltersize) {
+        return;
+    }
+
+    g_mutex_lock(socket_lock);
+    send(client_socket, &paddr, sizeof(paddr), 0);
+    g_mutex_unlock(socket_lock);
+}
+
 static void vcpu_mem_access(unsigned int vcpu_index, qemu_plugin_meminfo_t info,
                             uint64_t vaddr, void *userdata)
 {
@@ -395,9 +418,6 @@ static void vcpu_mem_access(unsigned int vcpu_index, qemu_plugin_meminfo_t info,
     bool hit_in_l1;
 
     hwaddr = qemu_plugin_get_hwaddr(info, vaddr);
-    if (hwaddr && qemu_plugin_hwaddr_is_io(hwaddr)) {
-        return;
-    }
 
     effective_addr = hwaddr ? qemu_plugin_hwaddr_phys_addr(hwaddr) : vaddr;
     cache_idx = vcpu_index % cores;
@@ -412,7 +432,11 @@ static void vcpu_mem_access(unsigned int vcpu_index, qemu_plugin_meminfo_t info,
     l1_dcaches[cache_idx]->accesses++;
     g_mutex_unlock(&l1_dcache_locks[cache_idx]);
 
-    if (hit_in_l1 || !use_l2) {
+    if (hit_in_l1) {
+        return;
+    }
+    if (!use_l2) {
+        miss(effective_addr);
         /* No need to access L2 */
         return;
     }
@@ -422,6 +446,7 @@ static void vcpu_mem_access(unsigned int vcpu_index, qemu_plugin_meminfo_t info,
         insn = userdata;
         __atomic_fetch_add(&insn->l2_misses, 1, __ATOMIC_SEQ_CST);
         l2_ucaches[cache_idx]->misses++;
+        miss(effective_addr);
     }
     l2_ucaches[cache_idx]->accesses++;
     g_mutex_unlock(&l2_ucache_locks[cache_idx]);
@@ -447,8 +472,12 @@ static void vcpu_insn_exec(unsigned int vcpu_index, void *userdata)
     l1_icaches[cache_idx]->accesses++;
     g_mutex_unlock(&l1_icache_locks[cache_idx]);
 
-    if (hit_in_l1 || !use_l2) {
-        /* No need to access L2 */
+    if (hit_in_l1) {
+        return;
+    }
+
+    if (!use_l2) {
+        miss(insn_addr);
         return;
     }
 
@@ -739,14 +768,16 @@ QEMU_PLUGIN_EXPORT
 int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info,
                         int argc, char **argv)
 {
-    int i;
+    int i, port;
     int l1_iassoc, l1_iblksize, l1_icachesize;
     int l1_dassoc, l1_dblksize, l1_dcachesize;
     int l2_assoc, l2_blksize, l2_cachesize;
+    struct sockaddr_in server_addr;
 
     limit = 32;
     sys = info->system_emulation;
 
+    port = -1;
     l1_dassoc = 8;
     l1_dblksize = 64;
     l1_dcachesize = l1_dblksize * l1_dassoc * 32;
@@ -808,11 +839,39 @@ int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info,
                 fprintf(stderr, "invalid eviction policy: %s\n", opt);
                 return -1;
             }
+        } else if (g_strcmp0(tokens[0], "port") == 0) {
+            port = STRTOLL(tokens[1]);
+        } else if (g_strcmp0(tokens[0], "missfilterbase") == 0) {
+            missfilterbase = STRTOLL(tokens[1]);
+        } else if (g_strcmp0(tokens[0], "missfiltersize") == 0) {
+            missfiltersize = STRTOLL(tokens[1]);
         } else {
             fprintf(stderr, "option parsing failed: %s\n", opt);
             return -1;
         }
     }
+    if (port >= -1) {
+        uint64_t paddr = 42; /* hello, I'm a provider */
+        client_socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+        if (client_socket < 0) {
+            printf("failed to create a socket\n");
+            return -1;
+        }
+        printf("Cache miss reported on on %lx size %lx\n",
+               missfilterbase, missfiltersize);
+        memset((char *)&server_addr, 0, sizeof(server_addr));
+        server_addr.sin_family = AF_INET;
+        server_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+        server_addr.sin_port = htons(port);
+
+        if (connect(client_socket, (struct sockaddr *)&server_addr,
+                    sizeof(server_addr)) < 0) {
+            close(client_socket);
+            return -1;
+        }
+        /* Let it know we are a data provider */
+        send(client_socket, &paddr, sizeof(paddr), 0);
+    }
 
     policy_init();
 
@@ -840,6 +899,8 @@ int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info,
         return -1;
     }
 
+    socket_lock = g_new0(GMutex, 1);
+
     l1_dcache_locks = g_new0(GMutex, cores);
     l1_icache_locks = g_new0(GMutex, cores);
     l2_ucache_locks = use_l2 ? g_new0(GMutex, cores) : NULL;
-- 
2.43.0


  parent reply	other threads:[~2025-01-24 17:30 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-01-24 17:29 [RFC PATCH QEMU 0/3] cxl/plugins: Hotness Monitoring Unit with 'real' data Jonathan Cameron
2025-01-24 17:29 ` [RFC PATCH QEMU 1/3] hw/cxl: Initial CXL Hotness Monitoring Unit Emulation Jonathan Cameron
2025-01-24 17:29 ` Jonathan Cameron [this message]
2025-05-20 14:16   ` [RFC PATCH QEMU 2/3] plugins: Add cache miss reporting over a socket Alex Bennée
2025-01-24 17:29 ` [RFC PATCH QEMU x3/3] contrib: Add example hotness monitoring unit server Jonathan Cameron
2025-01-24 20:55 ` [RFC PATCH QEMU 0/3] cxl/plugins: Hotness Monitoring Unit with 'real' data Pierrick Bouvier
2025-01-27 10:20   ` Jonathan Cameron
2025-01-28 20:04     ` Pierrick Bouvier
2025-01-29 10:29       ` Jonathan Cameron
2025-01-29 22:31         ` Pierrick Bouvier
2025-01-30 15:52           ` Jonathan Cameron
2025-01-30 18:28             ` Pierrick Bouvier
2025-01-31 11:15               ` Jonathan Cameron

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250124172905.84099-3-Jonathan.Cameron@huawei.com \
    --to=jonathan.cameron@huawei.com \
    --cc=alex.bennee@linaro.org \
    --cc=erdnaxe@crans.org \
    --cc=fan.ni@samsung.com \
    --cc=linux-cxl@vger.kernel.org \
    --cc=linuxarm@huawei.com \
    --cc=ma.mandourr@gmail.com \
    --cc=niyas.sait@huawei.com \
    --cc=pierrick.bouvier@linaro.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox