From: "Michael S. Tsirkin" <mst@redhat.com>
To: qemu-devel@nongnu.org
Cc: Peter Maydell <peter.maydell@linaro.org>,
"Dr. David Alan Gilbert" <dgilbert@redhat.com>,
Peter Xu <peterx@redhat.com>, Juan Quintela <quintela@redhat.com>
Subject: [Qemu-devel] [PULL v2 30/50] postcopy: Allow registering of fd handler
Date: Tue, 20 Mar 2018 05:17:46 +0200 [thread overview]
Message-ID: <1521515720-612046-31-git-send-email-mst@redhat.com> (raw)
In-Reply-To: <1521515720-612046-1-git-send-email-mst@redhat.com>
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Allow other userfaultfd's to be registered into the fault thread
so that handlers for shared memory can get responses.
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
migration/migration.h | 2 +
migration/postcopy-ram.h | 21 +++++
migration/migration.c | 6 ++
migration/postcopy-ram.c | 209 +++++++++++++++++++++++++++++++++++------------
migration/trace-events | 2 +
5 files changed, 187 insertions(+), 53 deletions(-)
diff --git a/migration/migration.h b/migration/migration.h
index 08c5d2d..d02a759 100644
--- a/migration/migration.h
+++ b/migration/migration.h
@@ -51,6 +51,8 @@ struct MigrationIncomingState {
QemuMutex rp_mutex; /* We send replies from multiple threads */
void *postcopy_tmp_page;
void *postcopy_tmp_zero_page;
+ /* PostCopyFD's for external userfaultfds & handlers of shared memory */
+ GArray *postcopy_remote_fds;
QEMUBH *bh;
diff --git a/migration/postcopy-ram.h b/migration/postcopy-ram.h
index 0421c98..f21eef6 100644
--- a/migration/postcopy-ram.h
+++ b/migration/postcopy-ram.h
@@ -143,4 +143,25 @@ void postcopy_remove_notifier(NotifierWithReturn *n);
/* Call the notifier list set by postcopy_add_start_notifier */
int postcopy_notify(enum PostcopyNotifyReason reason, Error **errp);
+struct PostCopyFD;
+
+/* ufd is a pointer to the struct uffd_msg *TODO: more Portable! */
+typedef int (*pcfdhandler)(struct PostCopyFD *pcfd, void *ufd);
+
+struct PostCopyFD {
+ int fd;
+ /* Data to pass to handler */
+ void *data;
+ /* Handler to be called whenever we get a poll event */
+ pcfdhandler handler;
+ /* A string to use in error messages */
+ const char *idstr;
+};
+
+/* Register a userfaultfd owned by an external process for
+ * shared memory.
+ */
+void postcopy_register_shared_ufd(struct PostCopyFD *pcfd);
+void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd);
+
#endif
diff --git a/migration/migration.c b/migration/migration.c
index 6a4780e..1f22f46 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -155,6 +155,8 @@ MigrationIncomingState *migration_incoming_get_current(void)
if (!once) {
mis_current.state = MIGRATION_STATUS_NONE;
memset(&mis_current, 0, sizeof(MigrationIncomingState));
+ mis_current.postcopy_remote_fds = g_array_new(FALSE, TRUE,
+ sizeof(struct PostCopyFD));
qemu_mutex_init(&mis_current.rp_mutex);
qemu_event_init(&mis_current.main_thread_load_event, false);
once = true;
@@ -177,6 +179,10 @@ void migration_incoming_state_destroy(void)
qemu_fclose(mis->from_src_file);
mis->from_src_file = NULL;
}
+ if (mis->postcopy_remote_fds) {
+ g_array_free(mis->postcopy_remote_fds, TRUE);
+ mis->postcopy_remote_fds = NULL;
+ }
qemu_event_reset(&mis->main_thread_load_event);
}
diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c
index 1089814..6ce1577 100644
--- a/migration/postcopy-ram.c
+++ b/migration/postcopy-ram.c
@@ -533,29 +533,44 @@ static void *postcopy_ram_fault_thread(void *opaque)
MigrationIncomingState *mis = opaque;
struct uffd_msg msg;
int ret;
+ size_t index;
RAMBlock *rb = NULL;
RAMBlock *last_rb = NULL; /* last RAMBlock we sent part of */
trace_postcopy_ram_fault_thread_entry();
qemu_sem_post(&mis->fault_thread_sem);
+ struct pollfd *pfd;
+ size_t pfd_len = 2 + mis->postcopy_remote_fds->len;
+
+ pfd = g_new0(struct pollfd, pfd_len);
+
+ pfd[0].fd = mis->userfault_fd;
+ pfd[0].events = POLLIN;
+ pfd[1].fd = mis->userfault_event_fd;
+ pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */
+ trace_postcopy_ram_fault_thread_fds_core(pfd[0].fd, pfd[1].fd);
+ for (index = 0; index < mis->postcopy_remote_fds->len; index++) {
+ struct PostCopyFD *pcfd = &g_array_index(mis->postcopy_remote_fds,
+ struct PostCopyFD, index);
+ pfd[2 + index].fd = pcfd->fd;
+ pfd[2 + index].events = POLLIN;
+ trace_postcopy_ram_fault_thread_fds_extra(2 + index, pcfd->idstr,
+ pcfd->fd);
+ }
+
while (true) {
ram_addr_t rb_offset;
- struct pollfd pfd[2];
+ int poll_result;
/*
* We're mainly waiting for the kernel to give us a faulting HVA,
* however we can be told to quit via userfault_quit_fd which is
* an eventfd
*/
- pfd[0].fd = mis->userfault_fd;
- pfd[0].events = POLLIN;
- pfd[0].revents = 0;
- pfd[1].fd = mis->userfault_event_fd;
- pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */
- pfd[1].revents = 0;
-
- if (poll(pfd, 2, -1 /* Wait forever */) == -1) {
+
+ poll_result = poll(pfd, pfd_len, -1 /* Wait forever */);
+ if (poll_result == -1) {
error_report("%s: userfault poll: %s", __func__, strerror(errno));
break;
}
@@ -575,57 +590,117 @@ static void *postcopy_ram_fault_thread(void *opaque)
}
}
- ret = read(mis->userfault_fd, &msg, sizeof(msg));
- if (ret != sizeof(msg)) {
- if (errno == EAGAIN) {
- /*
- * if a wake up happens on the other thread just after
- * the poll, there is nothing to read.
- */
- continue;
+ if (pfd[0].revents) {
+ poll_result--;
+ ret = read(mis->userfault_fd, &msg, sizeof(msg));
+ if (ret != sizeof(msg)) {
+ if (errno == EAGAIN) {
+ /*
+ * if a wake up happens on the other thread just after
+ * the poll, there is nothing to read.
+ */
+ continue;
+ }
+ if (ret < 0) {
+ error_report("%s: Failed to read full userfault "
+ "message: %s",
+ __func__, strerror(errno));
+ break;
+ } else {
+ error_report("%s: Read %d bytes from userfaultfd "
+ "expected %zd",
+ __func__, ret, sizeof(msg));
+ break; /* Lost alignment, don't know what we'd read next */
+ }
}
- if (ret < 0) {
- error_report("%s: Failed to read full userfault message: %s",
- __func__, strerror(errno));
- break;
- } else {
- error_report("%s: Read %d bytes from userfaultfd expected %zd",
- __func__, ret, sizeof(msg));
- break; /* Lost alignment, don't know what we'd read next */
+ if (msg.event != UFFD_EVENT_PAGEFAULT) {
+ error_report("%s: Read unexpected event %ud from userfaultfd",
+ __func__, msg.event);
+ continue; /* It's not a page fault, shouldn't happen */
}
- }
- if (msg.event != UFFD_EVENT_PAGEFAULT) {
- error_report("%s: Read unexpected event %ud from userfaultfd",
- __func__, msg.event);
- continue; /* It's not a page fault, shouldn't happen */
- }
- rb = qemu_ram_block_from_host(
- (void *)(uintptr_t)msg.arg.pagefault.address,
- true, &rb_offset);
- if (!rb) {
- error_report("postcopy_ram_fault_thread: Fault outside guest: %"
- PRIx64, (uint64_t)msg.arg.pagefault.address);
- break;
- }
+ rb = qemu_ram_block_from_host(
+ (void *)(uintptr_t)msg.arg.pagefault.address,
+ true, &rb_offset);
+ if (!rb) {
+ error_report("postcopy_ram_fault_thread: Fault outside guest: %"
+ PRIx64, (uint64_t)msg.arg.pagefault.address);
+ break;
+ }
- rb_offset &= ~(qemu_ram_pagesize(rb) - 1);
- trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address,
+ rb_offset &= ~(qemu_ram_pagesize(rb) - 1);
+ trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address,
qemu_ram_get_idstr(rb),
rb_offset);
+ /*
+ * Send the request to the source - we want to request one
+ * of our host page sizes (which is >= TPS)
+ */
+ if (rb != last_rb) {
+ last_rb = rb;
+ migrate_send_rp_req_pages(mis, qemu_ram_get_idstr(rb),
+ rb_offset, qemu_ram_pagesize(rb));
+ } else {
+ /* Save some space */
+ migrate_send_rp_req_pages(mis, NULL,
+ rb_offset, qemu_ram_pagesize(rb));
+ }
+ }
- /*
- * Send the request to the source - we want to request one
- * of our host page sizes (which is >= TPS)
- */
- if (rb != last_rb) {
- last_rb = rb;
- migrate_send_rp_req_pages(mis, qemu_ram_get_idstr(rb),
- rb_offset, qemu_ram_pagesize(rb));
- } else {
- /* Save some space */
- migrate_send_rp_req_pages(mis, NULL,
- rb_offset, qemu_ram_pagesize(rb));
+ /* Now handle any requests from external processes on shared memory */
+ /* TODO: May need to handle devices deregistering during postcopy */
+ for (index = 2; index < pfd_len && poll_result; index++) {
+ if (pfd[index].revents) {
+ struct PostCopyFD *pcfd =
+ &g_array_index(mis->postcopy_remote_fds,
+ struct PostCopyFD, index - 2);
+
+ poll_result--;
+ if (pfd[index].revents & POLLERR) {
+ error_report("%s: POLLERR on poll %zd fd=%d",
+ __func__, index, pcfd->fd);
+ pfd[index].events = 0;
+ continue;
+ }
+
+ ret = read(pcfd->fd, &msg, sizeof(msg));
+ if (ret != sizeof(msg)) {
+ if (errno == EAGAIN) {
+ /*
+ * if a wake up happens on the other thread just after
+ * the poll, there is nothing to read.
+ */
+ continue;
+ }
+ if (ret < 0) {
+ error_report("%s: Failed to read full userfault "
+ "message: %s (shared) revents=%d",
+ __func__, strerror(errno),
+ pfd[index].revents);
+ /*TODO: Could just disable this sharer */
+ break;
+ } else {
+ error_report("%s: Read %d bytes from userfaultfd "
+ "expected %zd (shared)",
+ __func__, ret, sizeof(msg));
+ /*TODO: Could just disable this sharer */
+ break; /*Lost alignment,don't know what we'd read next*/
+ }
+ }
+ if (msg.event != UFFD_EVENT_PAGEFAULT) {
+ error_report("%s: Read unexpected event %ud "
+ "from userfaultfd (shared)",
+ __func__, msg.event);
+ continue; /* It's not a page fault, shouldn't happen */
+ }
+ /* Call the device handler registered with us */
+ ret = pcfd->handler(pcfd, &msg);
+ if (ret) {
+ error_report("%s: Failed to resolve shared fault on %zd/%s",
+ __func__, index, pcfd->idstr);
+ /* TODO: Fail? Disable this sharer? */
+ }
+ }
}
}
trace_postcopy_ram_fault_thread_exit();
@@ -970,3 +1045,31 @@ PostcopyState postcopy_state_set(PostcopyState new_state)
{
return atomic_xchg(&incoming_postcopy_state, new_state);
}
+
+/* Register a handler for external shared memory postcopy
+ * called on the destination.
+ */
+void postcopy_register_shared_ufd(struct PostCopyFD *pcfd)
+{
+ MigrationIncomingState *mis = migration_incoming_get_current();
+
+ mis->postcopy_remote_fds = g_array_append_val(mis->postcopy_remote_fds,
+ *pcfd);
+}
+
+/* Unregister a handler for external shared memory postcopy
+ */
+void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd)
+{
+ guint i;
+ MigrationIncomingState *mis = migration_incoming_get_current();
+ GArray *pcrfds = mis->postcopy_remote_fds;
+
+ for (i = 0; i < pcrfds->len; i++) {
+ struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i);
+ if (cur->fd == pcfd->fd) {
+ mis->postcopy_remote_fds = g_array_remove_index(pcrfds, i);
+ return;
+ }
+ }
+}
diff --git a/migration/trace-events b/migration/trace-events
index 93961de..1e617ad 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -190,6 +190,8 @@ postcopy_place_page_zero(void *host_addr) "host=%p"
postcopy_ram_enable_notify(void) ""
postcopy_ram_fault_thread_entry(void) ""
postcopy_ram_fault_thread_exit(void) ""
+postcopy_ram_fault_thread_fds_core(int baseufd, int quitfd) "ufd: %d quitfd: %d"
+postcopy_ram_fault_thread_fds_extra(size_t index, const char *name, int fd) "%zd/%s: %d"
postcopy_ram_fault_thread_quit(void) ""
postcopy_ram_fault_thread_request(uint64_t hostaddr, const char *ramblock, size_t offset) "Request for HVA=0x%" PRIx64 " rb=%s offset=0x%zx"
postcopy_ram_incoming_cleanup_closeuf(void) ""
--
MST
next prev parent reply other threads:[~2018-03-20 3:17 UTC|newest]
Thread overview: 66+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-03-20 3:16 [Qemu-devel] [PULL v2 00/50] virtio, vhost, pci, pc: features, cleanups Michael S. Tsirkin
2018-03-20 3:16 ` [Qemu-devel] [PULL v2 01/50] scripts/update-linux-headers: add ethtool.h and update to 4.16.0-rc4 Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 02/50] virtio-net: use 64-bit values for feature flags Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 03/50] virtio-net: add linkspeed and duplex settings to virtio-net Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 04/50] acpi: remove unused acpi-dsdt.aml Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 05/50] pc: replace pm object initialization with one-liner in acpi_get_pm_info() Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 06/50] acpi: reuse AcpiGenericAddress instead of Acpi20GenericAddress Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 07/50] acpi: add build_append_gas() helper for Generic Address Structure Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 08/50] acpi: move ACPI_PORT_SMI_CMD define to header it belongs to Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 09/50] pc: acpi: isolate FADT specific data into AcpiFadtData structure Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 10/50] pc: acpi: use build_append_foo() API to construct FADT Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 12/50] virt_arm: acpi: reuse common build_fadt() Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 11/50] acpi: move build_fadt() from i386 specific to generic ACPI source Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 13/50] tests: acpi: don't read all fields in test_acpi_fadt_table() Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 14/50] standard-headers: update virtio_net.h Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 15/50] hw/pci: remove obsolete PCIDevice->init() Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 16/50] pc-dimm: make qmp_pc_dimm_device_list() sort devices by address Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 17/50] qmp: distinguish PC-DIMM and NVDIMM in MemoryDeviceInfoList Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 18/50] hw/acpi-build: build SRAT memory affinity structures for DIMM devices Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 19/50] tests/bios-tables-test: add test cases for DIMM proximity Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 20/50] test/acpi-test-data: add ACPI tables for dimmpxm test Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 21/50] Makefile: add target to print generated files Michael S. Tsirkin
2018-04-13 7:21 ` Markus Armbruster
2018-04-13 10:04 ` Marc-André Lureau
2018-04-13 12:51 ` Michael S. Tsirkin
2018-05-04 5:44 ` Markus Armbruster
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 22/50] migrate: Update ram_block_discard_range for shared Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 23/50] qemu_ram_block_host_offset Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 25/50] postcopy: Add notifier chain Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 24/50] postcopy: use UFFDIO_ZEROPAGE only when available Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 27/50] vhost-user: Add 'VHOST_USER_POSTCOPY_ADVISE' message Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 26/50] postcopy: Add vhost-user flag for postcopy and check it Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 28/50] libvhost-user: Support sending fds back to qemu Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 29/50] libvhost-user: Open userfaultfd Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 32/50] vhost+postcopy: Transmit 'listen' to slave Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 31/50] vhost+postcopy: Register shared ufd with postcopy Michael S. Tsirkin
2018-04-27 16:12 ` Peter Maydell
2018-05-02 10:58 ` Dr. David Alan Gilbert
2018-03-20 3:17 ` Michael S. Tsirkin [this message]
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 33/50] postcopy+vhost-user: Split set_mem_table for postcopy Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 34/50] migration/ram: ramblock_recv_bitmap_test_byte_offset Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 35/50] libvhost-user+postcopy: Register new regions with the ufd Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 36/50] vhost+postcopy: Send address back to qemu Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 37/50] vhost+postcopy: Stash RAMBlock and offset Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 39/50] vhost+postcopy: Resolve client address Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 38/50] vhost+postcopy: Helper to send requests to source for shared pages Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 41/50] postcopy: postcopy_notify_shared_wake Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 40/50] postcopy: helper for waking shared Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 43/50] vhost+postcopy: Call wakeups Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 42/50] vhost+postcopy: Add vhost waker Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 44/50] libvhost-user: mprotect & madvises for postcopy Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 45/50] vhost-user: Add VHOST_USER_POSTCOPY_END message Michael S. Tsirkin
2018-03-20 3:17 ` [Qemu-devel] [PULL v2 46/50] vhost+postcopy: Wire up POSTCOPY_END notify Michael S. Tsirkin
2018-03-20 3:18 ` [Qemu-devel] [PULL v2 48/50] postcopy: Allow shared memory Michael S. Tsirkin
2018-03-20 3:18 ` [Qemu-devel] [PULL v2 47/50] vhost: Huge page align and merge Michael S. Tsirkin
2018-03-20 3:18 ` [Qemu-devel] [PULL v2 50/50] postcopy shared docs Michael S. Tsirkin
2018-03-20 3:18 ` [Qemu-devel] [PULL v2 49/50] libvhost-user: Claim support for postcopy Michael S. Tsirkin
2018-03-20 14:18 ` [Qemu-devel] [PULL v2 00/50] virtio, vhost, pci, pc: features, cleanups Peter Maydell
2018-03-20 14:37 ` Michael S. Tsirkin
2018-03-20 15:05 ` Michael S. Tsirkin
2018-03-20 15:41 ` Peter Maydell
2018-03-20 15:51 ` Michael S. Tsirkin
2018-03-20 15:54 ` Peter Maydell
2018-03-20 16:02 ` Michael S. Tsirkin
2018-03-20 17:18 ` Peter Maydell
2018-03-20 17:22 ` Michael S. Tsirkin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1521515720-612046-31-git-send-email-mst@redhat.com \
--to=mst@redhat.com \
--cc=dgilbert@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).