* [Qemu-devel] [PATCH Qemu] Change spice-server protocol for GL texture passing
[not found] <1468590577-30914-1-git-send-email-fziglio@redhat.com>
@ 2016-07-15 13:49 ` Frediano Ziglio
2016-07-15 13:56 ` Frediano Ziglio
0 siblings, 1 reply; 8+ messages in thread
From: Frediano Ziglio @ 2016-07-15 13:49 UTC (permalink / raw)
To: spice-devel, Qemu-devel; +Cc: Frediano Ziglio
---
ui/spice-core.c | 5 -----
ui/spice-display.c | 29 ++++++++---------------------
2 files changed, 8 insertions(+), 26 deletions(-)
diff --git a/ui/spice-core.c b/ui/spice-core.c
index da05054..f7647f7 100644
--- a/ui/spice-core.c
+++ b/ui/spice-core.c
@@ -828,11 +828,6 @@ void qemu_spice_init(void)
#ifdef HAVE_SPICE_GL
if (qemu_opt_get_bool(opts, "gl", 0)) {
- if ((port != 0) || (tls_port != 0)) {
- error_report("SPICE GL support is local-only for now and "
- "incompatible with -spice port/tls-port");
- exit(1);
- }
if (egl_rendernode_init() != 0) {
error_report("Failed to initialize EGL render node for SPICE GL");
exit(1);
diff --git a/ui/spice-display.c b/ui/spice-display.c
index 2a77a54..72137bd 100644
--- a/ui/spice-display.c
+++ b/ui/spice-display.c
@@ -852,6 +852,10 @@ static void qemu_spice_gl_block_timer(void *opaque)
static QEMUGLContext qemu_spice_gl_create_context(DisplayChangeListener *dcl,
QEMUGLParams *params)
{
+ SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl);
+
+ spice_qxl_gl_init(&ssd->qxl, qemu_egl_display, qemu_egl_rn_ctx);
+
eglMakeCurrent(qemu_egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE,
qemu_egl_rn_ctx);
return qemu_egl_create_context(dcl, params);
@@ -864,28 +868,11 @@ static void qemu_spice_gl_scanout(DisplayChangeListener *dcl,
uint32_t w, uint32_t h)
{
SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl);
- EGLint stride = 0, fourcc = 0;
- int fd = -1;
-
- if (tex_id) {
- fd = egl_get_fd_for_texture(tex_id, &stride, &fourcc);
- if (fd < 0) {
- fprintf(stderr, "%s: failed to get fd for texture\n", __func__);
- return;
- }
- dprint(1, "%s: %dx%d (stride %d, fourcc 0x%x)\n", __func__,
- w, h, stride, fourcc);
- } else {
- dprint(1, "%s: no texture (no framebuffer)\n", __func__);
- }
-
- assert(!tex_id || fd >= 0);
- /* note: spice server will close the fd */
- spice_qxl_gl_scanout(&ssd->qxl, fd,
- surface_width(ssd->ds),
- surface_height(ssd->ds),
- stride, fourcc, y_0_top);
+ spice_qxl_gl_scanout_texture(&ssd->qxl, tex_id,
+ surface_width(ssd->ds),
+ surface_height(ssd->ds),
+ y_0_top);
qemu_spice_gl_monitor_config(ssd, x, y, w, h);
}
--
2.7.4
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [Qemu-devel] [PATCH Qemu] Change spice-server protocol for GL texture passing
2016-07-15 13:49 ` [Qemu-devel] [PATCH Qemu] Change spice-server protocol for GL texture passing Frediano Ziglio
@ 2016-07-15 13:56 ` Frediano Ziglio
2016-07-18 16:41 ` [Qemu-devel] [Spice-devel] " Marc-André Lureau
0 siblings, 1 reply; 8+ messages in thread
From: Frediano Ziglio @ 2016-07-15 13:56 UTC (permalink / raw)
To: spice-devel, Qemu-devel; +Cc: Gerd Hoffmann
Forgot to add RFC to the subject
Frediano
>
> ---
> ui/spice-core.c | 5 -----
> ui/spice-display.c | 29 ++++++++---------------------
> 2 files changed, 8 insertions(+), 26 deletions(-)
>
> diff --git a/ui/spice-core.c b/ui/spice-core.c
> index da05054..f7647f7 100644
> --- a/ui/spice-core.c
> +++ b/ui/spice-core.c
> @@ -828,11 +828,6 @@ void qemu_spice_init(void)
>
> #ifdef HAVE_SPICE_GL
> if (qemu_opt_get_bool(opts, "gl", 0)) {
> - if ((port != 0) || (tls_port != 0)) {
> - error_report("SPICE GL support is local-only for now and "
> - "incompatible with -spice port/tls-port");
> - exit(1);
> - }
> if (egl_rendernode_init() != 0) {
> error_report("Failed to initialize EGL render node for SPICE
> GL");
> exit(1);
> diff --git a/ui/spice-display.c b/ui/spice-display.c
> index 2a77a54..72137bd 100644
> --- a/ui/spice-display.c
> +++ b/ui/spice-display.c
> @@ -852,6 +852,10 @@ static void qemu_spice_gl_block_timer(void *opaque)
> static QEMUGLContext qemu_spice_gl_create_context(DisplayChangeListener
> *dcl,
> QEMUGLParams *params)
> {
> + SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl);
> +
> + spice_qxl_gl_init(&ssd->qxl, qemu_egl_display, qemu_egl_rn_ctx);
> +
> eglMakeCurrent(qemu_egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE,
> qemu_egl_rn_ctx);
> return qemu_egl_create_context(dcl, params);
> @@ -864,28 +868,11 @@ static void qemu_spice_gl_scanout(DisplayChangeListener
> *dcl,
> uint32_t w, uint32_t h)
> {
> SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl);
> - EGLint stride = 0, fourcc = 0;
> - int fd = -1;
> -
> - if (tex_id) {
> - fd = egl_get_fd_for_texture(tex_id, &stride, &fourcc);
> - if (fd < 0) {
> - fprintf(stderr, "%s: failed to get fd for texture\n", __func__);
> - return;
> - }
> - dprint(1, "%s: %dx%d (stride %d, fourcc 0x%x)\n", __func__,
> - w, h, stride, fourcc);
> - } else {
> - dprint(1, "%s: no texture (no framebuffer)\n", __func__);
> - }
> -
> - assert(!tex_id || fd >= 0);
>
> - /* note: spice server will close the fd */
> - spice_qxl_gl_scanout(&ssd->qxl, fd,
> - surface_width(ssd->ds),
> - surface_height(ssd->ds),
> - stride, fourcc, y_0_top);
> + spice_qxl_gl_scanout_texture(&ssd->qxl, tex_id,
> + surface_width(ssd->ds),
> + surface_height(ssd->ds),
> + y_0_top);
>
> qemu_spice_gl_monitor_config(ssd, x, y, w, h);
> }
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [Qemu-devel] [Spice-devel] [PATCH Qemu] Change spice-server protocol for GL texture passing
2016-07-15 13:56 ` Frediano Ziglio
@ 2016-07-18 16:41 ` Marc-André Lureau
2016-07-19 9:45 ` Frediano Ziglio
0 siblings, 1 reply; 8+ messages in thread
From: Marc-André Lureau @ 2016-07-18 16:41 UTC (permalink / raw)
To: Frediano Ziglio; +Cc: spice-devel, Qemu-devel, Gerd Hoffmann
Hi
----- Original Message -----
> Forgot to add RFC to the subject
>
What's the rationale? if you share the texture id, you must share the GL context too, right? Why not use a lower level dmabuf fd that can be imported by the server gl context (which is also what the protocol require anyway)?
>
> >
> > ---
> > ui/spice-core.c | 5 -----
> > ui/spice-display.c | 29 ++++++++---------------------
> > 2 files changed, 8 insertions(+), 26 deletions(-)
> >
> > diff --git a/ui/spice-core.c b/ui/spice-core.c
> > index da05054..f7647f7 100644
> > --- a/ui/spice-core.c
> > +++ b/ui/spice-core.c
> > @@ -828,11 +828,6 @@ void qemu_spice_init(void)
> >
> > #ifdef HAVE_SPICE_GL
> > if (qemu_opt_get_bool(opts, "gl", 0)) {
> > - if ((port != 0) || (tls_port != 0)) {
> > - error_report("SPICE GL support is local-only for now and "
> > - "incompatible with -spice port/tls-port");
> > - exit(1);
> > - }
> > if (egl_rendernode_init() != 0) {
> > error_report("Failed to initialize EGL render node for SPICE
> > GL");
> > exit(1);
> > diff --git a/ui/spice-display.c b/ui/spice-display.c
> > index 2a77a54..72137bd 100644
> > --- a/ui/spice-display.c
> > +++ b/ui/spice-display.c
> > @@ -852,6 +852,10 @@ static void qemu_spice_gl_block_timer(void *opaque)
> > static QEMUGLContext qemu_spice_gl_create_context(DisplayChangeListener
> > *dcl,
> > QEMUGLParams *params)
> > {
> > + SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl);
> > +
> > + spice_qxl_gl_init(&ssd->qxl, qemu_egl_display, qemu_egl_rn_ctx);
> > +
> > eglMakeCurrent(qemu_egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE,
> > qemu_egl_rn_ctx);
> > return qemu_egl_create_context(dcl, params);
> > @@ -864,28 +868,11 @@ static void
> > qemu_spice_gl_scanout(DisplayChangeListener
> > *dcl,
> > uint32_t w, uint32_t h)
> > {
> > SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl);
> > - EGLint stride = 0, fourcc = 0;
> > - int fd = -1;
> > -
> > - if (tex_id) {
> > - fd = egl_get_fd_for_texture(tex_id, &stride, &fourcc);
> > - if (fd < 0) {
> > - fprintf(stderr, "%s: failed to get fd for texture\n",
> > __func__);
> > - return;
> > - }
> > - dprint(1, "%s: %dx%d (stride %d, fourcc 0x%x)\n", __func__,
> > - w, h, stride, fourcc);
> > - } else {
> > - dprint(1, "%s: no texture (no framebuffer)\n", __func__);
> > - }
> > -
> > - assert(!tex_id || fd >= 0);
> >
> > - /* note: spice server will close the fd */
> > - spice_qxl_gl_scanout(&ssd->qxl, fd,
> > - surface_width(ssd->ds),
> > - surface_height(ssd->ds),
> > - stride, fourcc, y_0_top);
> > + spice_qxl_gl_scanout_texture(&ssd->qxl, tex_id,
> > + surface_width(ssd->ds),
> > + surface_height(ssd->ds),
> > + y_0_top);
> >
> > qemu_spice_gl_monitor_config(ssd, x, y, w, h);
> > }
> _______________________________________________
> Spice-devel mailing list
> Spice-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/spice-devel
>
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [Qemu-devel] [Spice-devel] [PATCH Qemu] Change spice-server protocol for GL texture passing
2016-07-18 16:41 ` [Qemu-devel] [Spice-devel] " Marc-André Lureau
@ 2016-07-19 9:45 ` Frediano Ziglio
2016-07-19 12:56 ` Marc-André Lureau
0 siblings, 1 reply; 8+ messages in thread
From: Frediano Ziglio @ 2016-07-19 9:45 UTC (permalink / raw)
To: Marc-André Lureau; +Cc: spice-devel, Qemu-devel, Gerd Hoffmann
>
> Hi
>
> ----- Original Message -----
> > Forgot to add RFC to the subject
> >
>
> What's the rationale? if you share the texture id, you must share the GL
> context too, right? Why not use a lower level dmabuf fd that can be imported
> by the server gl context (which is also what the protocol require anyway)?
>
Yes, the display and context are shared using spice_qxl_gl_init.
Importing again into a gl context would mean that you have to export the
DRM prime and import again in a separate (not shared) context.
It's also doable, just add 2 system call and wrapping/unwrapping.
Would be good to pass the EGLDisplay then so spice-server don't have to
initialize again possibly using another physical card.
We have 4 cases:
- client not connected;
- local client;
- remote client, software encoding;
- remote client, hardware encoding.
Client not connected
Passing the texture is a no-operation, passing DRM prime require to
extract the handle and close every frame.
Local client
In this case there is no overhear, DRM prime is always extracted and
passed to the client
Remote client, software encoding
Due to different problems (DRM prime not mmap-able or data not portably
extractable) we'll need to import the DRM prime into a different EGL
context (not shared with the original one), create another texture,
extract data and free all texture/DRM prime.
Remote client, hardware encoding
It's not clear if it's better to pass the DRM prime or the texture,
some API pass the texture. I got confirmation that gst_dmabuf_allocator_new
could try to use mmap in some cases so we should check this somehow
to make sure it does not.
Taking into account that DRM prime came with "free" reference counting
creating the DRM prime from texture basically increase a counter which is
used by our implementation to make sure texture is still existing so
possibly passing texture instead of DRM prime just save a system call
in the normal case. I don't know what happens to the DRM object handle when
the texture is destroyed (in Qemu) with glDeleteTextures if bindings keep
texture "alive" or are all reset.
Could be that keeping qemu_spice_gl_scanout and spice_qxl_gl_scanout_texture
as current implementation and adding a spice_qxl_gl_init/spice_qxl_gl_setup
passing just QXLInstance and EGLDisplay is a better solution.
Does is sound reasonable?
Frediano
> >
> > >
> > > ---
> > > ui/spice-core.c | 5 -----
> > > ui/spice-display.c | 29 ++++++++---------------------
> > > 2 files changed, 8 insertions(+), 26 deletions(-)
> > >
> > > diff --git a/ui/spice-core.c b/ui/spice-core.c
> > > index da05054..f7647f7 100644
> > > --- a/ui/spice-core.c
> > > +++ b/ui/spice-core.c
> > > @@ -828,11 +828,6 @@ void qemu_spice_init(void)
> > >
> > > #ifdef HAVE_SPICE_GL
> > > if (qemu_opt_get_bool(opts, "gl", 0)) {
> > > - if ((port != 0) || (tls_port != 0)) {
> > > - error_report("SPICE GL support is local-only for now and "
> > > - "incompatible with -spice port/tls-port");
> > > - exit(1);
> > > - }
> > > if (egl_rendernode_init() != 0) {
> > > error_report("Failed to initialize EGL render node for SPICE
> > > GL");
> > > exit(1);
> > > diff --git a/ui/spice-display.c b/ui/spice-display.c
> > > index 2a77a54..72137bd 100644
> > > --- a/ui/spice-display.c
> > > +++ b/ui/spice-display.c
> > > @@ -852,6 +852,10 @@ static void qemu_spice_gl_block_timer(void *opaque)
> > > static QEMUGLContext qemu_spice_gl_create_context(DisplayChangeListener
> > > *dcl,
> > > QEMUGLParams *params)
> > > {
> > > + SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay,
> > > dcl);
> > > +
> > > + spice_qxl_gl_init(&ssd->qxl, qemu_egl_display, qemu_egl_rn_ctx);
> > > +
> > > eglMakeCurrent(qemu_egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE,
> > > qemu_egl_rn_ctx);
> > > return qemu_egl_create_context(dcl, params);
> > > @@ -864,28 +868,11 @@ static void
> > > qemu_spice_gl_scanout(DisplayChangeListener
> > > *dcl,
> > > uint32_t w, uint32_t h)
> > > {
> > > SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay,
> > > dcl);
> > > - EGLint stride = 0, fourcc = 0;
> > > - int fd = -1;
> > > -
> > > - if (tex_id) {
> > > - fd = egl_get_fd_for_texture(tex_id, &stride, &fourcc);
> > > - if (fd < 0) {
> > > - fprintf(stderr, "%s: failed to get fd for texture\n",
> > > __func__);
> > > - return;
> > > - }
> > > - dprint(1, "%s: %dx%d (stride %d, fourcc 0x%x)\n", __func__,
> > > - w, h, stride, fourcc);
> > > - } else {
> > > - dprint(1, "%s: no texture (no framebuffer)\n", __func__);
> > > - }
> > > -
> > > - assert(!tex_id || fd >= 0);
> > >
> > > - /* note: spice server will close the fd */
> > > - spice_qxl_gl_scanout(&ssd->qxl, fd,
> > > - surface_width(ssd->ds),
> > > - surface_height(ssd->ds),
> > > - stride, fourcc, y_0_top);
> > > + spice_qxl_gl_scanout_texture(&ssd->qxl, tex_id,
> > > + surface_width(ssd->ds),
> > > + surface_height(ssd->ds),
> > > + y_0_top);
> > >
> > > qemu_spice_gl_monitor_config(ssd, x, y, w, h);
> > > }
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [Qemu-devel] [Spice-devel] [PATCH Qemu] Change spice-server protocol for GL texture passing
2016-07-19 9:45 ` Frediano Ziglio
@ 2016-07-19 12:56 ` Marc-André Lureau
2016-07-19 13:41 ` Frediano Ziglio
0 siblings, 1 reply; 8+ messages in thread
From: Marc-André Lureau @ 2016-07-19 12:56 UTC (permalink / raw)
To: Frediano Ziglio; +Cc: spice-devel, Qemu-devel, Gerd Hoffmann
Hi
----- Original Message -----
> >
> > Hi
> >
> > ----- Original Message -----
> > > Forgot to add RFC to the subject
> > >
> >
> > What's the rationale? if you share the texture id, you must share the GL
> > context too, right? Why not use a lower level dmabuf fd that can be
> > imported
> > by the server gl context (which is also what the protocol require anyway)?
> >
>
> Yes, the display and context are shared using spice_qxl_gl_init.
> Importing again into a gl context would mean that you have to export the
> DRM prime and import again in a separate (not shared) context.
> It's also doable, just add 2 system call and wrapping/unwrapping.
> Would be good to pass the EGLDisplay then so spice-server don't have to
> initialize again possibly using another physical card.
>
> We have 4 cases:
> - client not connected;
> - local client;
> - remote client, software encoding;
> - remote client, hardware encoding.
>
Before optimizing those syscalls and changing API etc, I would like to know if they are expensive (it's not my feeling)
Also, it is possible virglrenderer could be optimized to avoid exporting the prime fd for each scanout, if the backing image is always the same.
Sharing a GL context brings new issues. If spice server could use its own context, we have some context isolation (gl is still bad at MT iirc).
> Client not connected
> Passing the texture is a no-operation, passing DRM prime require to
> extract the handle and close every frame.
>
> Local client
> In this case there is no overhear, DRM prime is always extracted and
> passed to the client
>
> Remote client, software encoding
> Due to different problems (DRM prime not mmap-able or data not portably
> extractable) we'll need to import the DRM prime into a different EGL
> context (not shared with the original one), create another texture,
> extract data and free all texture/DRM prime.
I don't think we have strong reasons to support software encoding, video encoding is really expensive, and that mmap/copy is not going to be marginal, so even less these 2 syscalls.
>
> Remote client, hardware encoding
> It's not clear if it's better to pass the DRM prime or the texture,
> some API pass the texture. I got confirmation that gst_dmabuf_allocator_new
> could try to use mmap in some cases so we should check this somehow
> to make sure it does not.
>
We definitely don't want any mmap/copy to take place for hw encoding.
> Taking into account that DRM prime came with "free" reference counting
> creating the DRM prime from texture basically increase a counter which is
> used by our implementation to make sure texture is still existing so
> possibly passing texture instead of DRM prime just save a system call
> in the normal case. I don't know what happens to the DRM object handle when
> the texture is destroyed (in Qemu) with glDeleteTextures if bindings keep
> texture "alive" or are all reset.
>
>
> Could be that keeping qemu_spice_gl_scanout and spice_qxl_gl_scanout_texture
> as current implementation and adding a spice_qxl_gl_init/spice_qxl_gl_setup
> passing just QXLInstance and EGLDisplay is a better solution.
>
> Does is sound reasonable?
I wouldn't rush with API changes before we have a better idea how hw encoding can be done without mmap and wether its really worth it (I would rather see spice spawning a seperate gl context and process for the encoding than sharing it)
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [Qemu-devel] [Spice-devel] [PATCH Qemu] Change spice-server protocol for GL texture passing
2016-07-19 12:56 ` Marc-André Lureau
@ 2016-07-19 13:41 ` Frediano Ziglio
2016-07-20 15:13 ` Christophe Fergeau
0 siblings, 1 reply; 8+ messages in thread
From: Frediano Ziglio @ 2016-07-19 13:41 UTC (permalink / raw)
To: Marc-André Lureau; +Cc: spice-devel, Qemu-devel, Gerd Hoffmann
> Hi
>
> ----- Original Message -----
> > >
> > > Hi
> > >
> > > ----- Original Message -----
> > > > Forgot to add RFC to the subject
> > > >
> > >
> > > What's the rationale? if you share the texture id, you must share the GL
> > > context too, right? Why not use a lower level dmabuf fd that can be
> > > imported
> > > by the server gl context (which is also what the protocol require
> > > anyway)?
> > >
> >
> > Yes, the display and context are shared using spice_qxl_gl_init.
> > Importing again into a gl context would mean that you have to export the
> > DRM prime and import again in a separate (not shared) context.
> > It's also doable, just add 2 system call and wrapping/unwrapping.
> > Would be good to pass the EGLDisplay then so spice-server don't have to
> > initialize again possibly using another physical card.
> >
> > We have 4 cases:
> > - client not connected;
> > - local client;
> > - remote client, software encoding;
> > - remote client, hardware encoding.
> >
>
> Before optimizing those syscalls and changing API etc, I would like to know
> if they are expensive (it's not my feeling)
>
> Also, it is possible virglrenderer could be optimized to avoid exporting the
> prime fd for each scanout, if the backing image is always the same.
>
> Sharing a GL context brings new issues. If spice server could use its own
> context, we have some context isolation (gl is still bad at MT iirc).
>
> > Client not connected
> > Passing the texture is a no-operation, passing DRM prime require to
> > extract the handle and close every frame.
> >
> > Local client
> > In this case there is no overhear, DRM prime is always extracted and
> > passed to the client
> >
> > Remote client, software encoding
> > Due to different problems (DRM prime not mmap-able or data not portably
> > extractable) we'll need to import the DRM prime into a different EGL
> > context (not shared with the original one), create another texture,
> > extract data and free all texture/DRM prime.
>
> I don't think we have strong reasons to support software encoding, video
> encoding is really expensive, and that mmap/copy is not going to be
> marginal, so even less these 2 syscalls.
>
Using HW encoding is not easy at it seems:
- you have to have client supporting server HW encoders;
- you have to install additional software often closed source, accepting
patents;
- you have to have right permission on the system.
What are you doing if these option are not respected? Do not allow
connections? Showing blank screen?
With a good (local) connection I can easily play using software MJPEG, why
we should avoid such configurations?
> >
> > Remote client, hardware encoding
> > It's not clear if it's better to pass the DRM prime or the texture,
> > some API pass the texture. I got confirmation that gst_dmabuf_allocator_new
> > could try to use mmap in some cases so we should check this somehow
> > to make sure it does not.
> >
>
> We definitely don't want any mmap/copy to take place for hw encoding.
>
Sure but that's hard to avoid fall backs with all different setups.
> > Taking into account that DRM prime came with "free" reference counting
> > creating the DRM prime from texture basically increase a counter which is
> > used by our implementation to make sure texture is still existing so
> > possibly passing texture instead of DRM prime just save a system call
> > in the normal case. I don't know what happens to the DRM object handle when
> > the texture is destroyed (in Qemu) with glDeleteTextures if bindings keep
> > texture "alive" or are all reset.
> >
> >
> > Could be that keeping qemu_spice_gl_scanout and
> > spice_qxl_gl_scanout_texture
> > as current implementation and adding a spice_qxl_gl_init/spice_qxl_gl_setup
> > passing just QXLInstance and EGLDisplay is a better solution.
> >
> > Does is sound reasonable?
>
> I wouldn't rush with API changes before we have a better idea how hw encoding
> can be done without mmap and wether its really worth it (I would rather see
> spice spawning a seperate gl context and process for the encoding than
> sharing it)
>
I'm not rushing, this was the idea of RFC.
Spawing a process helps just to solve library licenses.
My list of patches for spice-server used the passed context just to create
a new context which is shared with the provided one, as I said using different
gl context and importing the DRM prime is a good option.
Passing the EGLDisplay from Qemu helps solving:
- double EGL initialization;
- multiple cards issues;
- -chroot/-runas Qemu options, where you loose access and you are not
able to initialize EGL/VAAPI again.
I can see that Qemu searching for the card is different from VAAPI.
In case of multiple cards and Qemu run as a daemon (not having Xwayland/X)
you can end up using two physical cards.
I'll try VAAPI DRM prime passing, I hope this week.
Frediano
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [Qemu-devel] [Spice-devel] [PATCH Qemu] Change spice-server protocol for GL texture passing
2016-07-19 13:41 ` Frediano Ziglio
@ 2016-07-20 15:13 ` Christophe Fergeau
2016-07-21 11:43 ` Frediano Ziglio
0 siblings, 1 reply; 8+ messages in thread
From: Christophe Fergeau @ 2016-07-20 15:13 UTC (permalink / raw)
To: Frediano Ziglio
Cc: Marc-André Lureau, spice-devel, Qemu-devel, Gerd Hoffmann
[-- Attachment #1: Type: text/plain, Size: 1086 bytes --]
On Tue, Jul 19, 2016 at 09:41:22AM -0400, Frediano Ziglio wrote:
> > I don't think we have strong reasons to support software encoding, video
> > encoding is really expensive, and that mmap/copy is not going to be
> > marginal, so even less these 2 syscalls.
> >
>
> Using HW encoding is not easy at it seems:
> - you have to have client supporting server HW encoders;
> - you have to install additional software often closed source, accepting
> patents;
> - you have to have right permission on the system.
> What are you doing if these option are not respected? Do not allow
> connections? Showing blank screen?
> With a good (local) connection I can easily play using software MJPEG, why
> we should avoid such configurations?
What we should be aiming/optimizing for is the hardware-accelerated
case. We will need a fallback when this is not usable, but the various
copies/encoding/... are going to be very expensive by themselves. Are
these changes (passing texture rather than dmabuf) making a significant
difference with software encoding?
Christophe
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 819 bytes --]
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [Qemu-devel] [Spice-devel] [PATCH Qemu] Change spice-server protocol for GL texture passing
2016-07-20 15:13 ` Christophe Fergeau
@ 2016-07-21 11:43 ` Frediano Ziglio
0 siblings, 0 replies; 8+ messages in thread
From: Frediano Ziglio @ 2016-07-21 11:43 UTC (permalink / raw)
To: Christophe Fergeau
Cc: spice-devel, Marc-André Lureau, Qemu-devel, Gerd Hoffmann
>
> On Tue, Jul 19, 2016 at 09:41:22AM -0400, Frediano Ziglio wrote:
> > > I don't think we have strong reasons to support software encoding, video
> > > encoding is really expensive, and that mmap/copy is not going to be
> > > marginal, so even less these 2 syscalls.
> > >
> >
> > Using HW encoding is not easy at it seems:
> > - you have to have client supporting server HW encoders;
> > - you have to install additional software often closed source, accepting
> > patents;
> > - you have to have right permission on the system.
> > What are you doing if these option are not respected? Do not allow
> > connections? Showing blank screen?
> > With a good (local) connection I can easily play using software MJPEG, why
> > we should avoid such configurations?
>
> What we should be aiming/optimizing for is the hardware-accelerated
> case. We will need a fallback when this is not usable, but the various
> copies/encoding/... are going to be very expensive by themselves. Are
> these changes (passing texture rather than dmabuf) making a significant
> difference with software encoding?
>
> Christophe
>
Got some experimental results passing DRM primes to gstreamer
(https://www.youtube.com/watch?v=NFDvMHfXUHA).
With VAAPI it's working the full frame processing decreased by a 50%.
No, they are not expensive and we could just (in case of fallback)
import in a new GL context to have them extracted correctly.
I'm actually trying to make it works again with software encoders (no VAAPI
and having to extract raw data). It's working as with kernel 4.6 i915
allows mmap but as the texture are in a different format I get quite some
garbage (they should be extracted with GL which know these problems).
Frediano
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2016-07-21 11:43 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <1468590577-30914-1-git-send-email-fziglio@redhat.com>
2016-07-15 13:49 ` [Qemu-devel] [PATCH Qemu] Change spice-server protocol for GL texture passing Frediano Ziglio
2016-07-15 13:56 ` Frediano Ziglio
2016-07-18 16:41 ` [Qemu-devel] [Spice-devel] " Marc-André Lureau
2016-07-19 9:45 ` Frediano Ziglio
2016-07-19 12:56 ` Marc-André Lureau
2016-07-19 13:41 ` Frediano Ziglio
2016-07-20 15:13 ` Christophe Fergeau
2016-07-21 11:43 ` Frediano Ziglio
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).