qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Emilio G. Cota" <cota@braap.org>
To: qemu-devel@nongnu.org
Cc: "Aurelien Jarno" <aurelien@aurel32.net>,
	"Peter Maydell" <peter.maydell@linaro.org>,
	"Alex Bennée" <alex.bennee@linaro.org>,
	"Laurent Vivier" <laurent@vivier.eu>,
	"Richard Henderson" <richard.henderson@linaro.org>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Mark Cave-Ayland" <mark.cave-ayland@ilande.co.uk>
Subject: [Qemu-devel] [PATCH v4 09/14] hardfloat: support float32/64 addition and subtraction
Date: Mon, 11 Jun 2018 21:48:55 -0400	[thread overview]
Message-ID: <1528768140-17894-10-git-send-email-cota@braap.org> (raw)
In-Reply-To: <1528768140-17894-1-git-send-email-cota@braap.org>

Performance results (single and double precision) for fp-bench:

1. Intel(R) Core(TM) i7-6700K CPU @ 4.00GHz
- before:
add-single: 135.07 MFlops
add-double: 131.60 MFlops
sub-single: 130.04 MFlops
sub-double: 133.01 MFlops
- after:
add-single: 443.04 MFlops
add-double: 301.95 MFlops
sub-single: 411.36 MFlops
sub-double: 293.15 MFlops

2. ARM Aarch64 A57 @ 2.4GHz
- before:
add-single: 44.79 MFlops
add-double: 49.20 MFlops
sub-single: 44.55 MFlops
sub-double: 49.06 MFlops
- after:
add-single: 93.28 MFlops
add-double: 88.27 MFlops
sub-single: 91.47 MFlops
sub-double: 88.27 MFlops

3. IBM POWER8E @ 2.1 GHz
- before:
add-single: 72.59 MFlops
add-double: 72.27 MFlops
sub-single: 75.33 MFlops
sub-double: 70.54 MFlops
- after:
add-single: 112.95 MFlops
add-double: 201.11 MFlops
sub-single: 116.80 MFlops
sub-double: 188.72 MFlops

Note that the IBM and ARM machines benefit from having
HARDFLOAT_2F{32,64}_USE_FP set to 0. Otherwise their performance
can suffer significantly:
- IBM Power8:
add-single: [1] 54.94 vs [0] 116.37 MFlops
add-double: [1] 58.92 vs [0] 201.44 MFlops
- Aarch64 A57:
add-single: [1] 80.72 vs [0] 93.24 MFlops
add-double: [1] 82.10 vs [0] 88.18 MFlops

On the Intel machine, having 2F64 set to 1 pays off, but it
doesn't for 2F32:
- Intel i7-6700K:
add-single: [1] 285.79 vs [0] 426.70 MFlops
add-double: [1] 302.15 vs [0] 278.82 MFlops

Signed-off-by: Emilio G. Cota <cota@braap.org>
---
 fpu/softfloat.c | 106 +++++++++++++++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 98 insertions(+), 8 deletions(-)

diff --git a/fpu/softfloat.c b/fpu/softfloat.c
index 4d378d7..cdce6b2 100644
--- a/fpu/softfloat.c
+++ b/fpu/softfloat.c
@@ -1077,8 +1077,8 @@ float16  __attribute__((flatten)) float16_add(float16 a, float16 b,
     return float16_round_pack_canonical(pr, status);
 }
 
-float32 __attribute__((flatten)) float32_add(float32 a, float32 b,
-                                             float_status *status)
+static float32 QEMU_SOFTFLOAT_ATTR
+soft_float32_add(float32 a, float32 b, float_status *status)
 {
     FloatParts pa = float32_unpack_canonical(a, status);
     FloatParts pb = float32_unpack_canonical(b, status);
@@ -1087,8 +1087,8 @@ float32 __attribute__((flatten)) float32_add(float32 a, float32 b,
     return float32_round_pack_canonical(pr, status);
 }
 
-float64 __attribute__((flatten)) float64_add(float64 a, float64 b,
-                                             float_status *status)
+static float64 QEMU_SOFTFLOAT_ATTR
+soft_float64_add(float64 a, float64 b, float_status *status)
 {
     FloatParts pa = float64_unpack_canonical(a, status);
     FloatParts pb = float64_unpack_canonical(b, status);
@@ -1107,8 +1107,8 @@ float16 __attribute__((flatten)) float16_sub(float16 a, float16 b,
     return float16_round_pack_canonical(pr, status);
 }
 
-float32 __attribute__((flatten)) float32_sub(float32 a, float32 b,
-                                             float_status *status)
+static float32 QEMU_SOFTFLOAT_ATTR
+soft_float32_sub(float32 a, float32 b, float_status *status)
 {
     FloatParts pa = float32_unpack_canonical(a, status);
     FloatParts pb = float32_unpack_canonical(b, status);
@@ -1117,8 +1117,8 @@ float32 __attribute__((flatten)) float32_sub(float32 a, float32 b,
     return float32_round_pack_canonical(pr, status);
 }
 
-float64 __attribute__((flatten)) float64_sub(float64 a, float64 b,
-                                             float_status *status)
+static float64 QEMU_SOFTFLOAT_ATTR
+soft_float64_sub(float64 a, float64 b, float_status *status)
 {
     FloatParts pa = float64_unpack_canonical(a, status);
     FloatParts pb = float64_unpack_canonical(b, status);
@@ -1127,6 +1127,96 @@ float64 __attribute__((flatten)) float64_sub(float64 a, float64 b,
     return float64_round_pack_canonical(pr, status);
 }
 
+static float float_add(float a, float b)
+{
+    return a + b;
+}
+
+static float float_sub(float a, float b)
+{
+    return a - b;
+}
+
+static double double_add(double a, double b)
+{
+    return a + b;
+}
+
+static double double_sub(double a, double b)
+{
+    return a - b;
+}
+
+static bool f32_addsub_post(float32 a, float32 b, const struct float_status *s)
+{
+    return !(float32_is_zero(a) && float32_is_zero(b));
+}
+
+static bool
+float_addsub_post(float a, float b, const struct float_status *s)
+{
+    return !(fpclassify(a) == FP_ZERO && fpclassify(b) == FP_ZERO);
+}
+
+static bool f64_addsub_post(float64 a, float64 b, const struct float_status *s)
+{
+    return !(float64_is_zero(a) && float64_is_zero(b));
+}
+
+static bool
+double_addsub_post(double a, double b, const struct float_status *s)
+{
+    return !(fpclassify(a) == FP_ZERO && fpclassify(b) == FP_ZERO);
+}
+
+static float32 float32_addsub(float32 a, float32 b, float_status *s,
+                              float_op2_func_t hard, f32_op2_func_t soft)
+{
+    if (QEMU_HARDFLOAT_2F32_USE_FP) {
+        return float_gen2(a, b, s, hard, soft, float_is_zon2, float_addsub_post,
+                          NULL, NULL);
+    } else {
+        return f32_gen2(a, b, s, hard, soft, f32_is_zon2, f32_addsub_post,
+                        NULL, NULL);
+    }
+}
+
+static float64 float64_addsub(float64 a, float64 b, float_status *s,
+                              double_op2_func_t hard, f64_op2_func_t soft)
+{
+    if (QEMU_HARDFLOAT_2F64_USE_FP) {
+        return double_gen2(a, b, s, hard, soft, double_is_zon2,
+                           double_addsub_post, NULL, NULL);
+    } else {
+        return f64_gen2(a, b, s, hard, soft, f64_is_zon2, f64_addsub_post,
+                        NULL, NULL);
+    }
+}
+
+float32 __attribute__((flatten))
+float32_add(float32 a, float32 b, float_status *s)
+{
+    return float32_addsub(a, b, s, float_add, soft_float32_add);
+}
+
+float32 __attribute__((flatten))
+float32_sub(float32 a, float32 b, float_status *s)
+{
+    return float32_addsub(a, b, s, float_sub, soft_float32_sub);
+}
+
+float64 __attribute__((flatten))
+float64_add(float64 a, float64 b, float_status *s)
+{
+    return float64_addsub(a, b, s, double_add, soft_float64_add);
+}
+
+float64 __attribute__((flatten))
+float64_sub(float64 a, float64 b, float_status *s)
+{
+    return float64_addsub(a, b, s, double_sub, soft_float64_sub);
+}
+
 /*
  * Returns the result of multiplying the floating-point values `a' and
  * `b'. The operation is performed according to the IEC/IEEE Standard
-- 
2.7.4

  parent reply	other threads:[~2018-06-12  1:49 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-06-12  1:48 [Qemu-devel] [PATCH v4 00/14] fp-test + hardfloat Emilio G. Cota
2018-06-12  1:48 ` [Qemu-devel] [PATCH v4 01/14] tests: add fp-test, a floating point test suite Emilio G. Cota
2018-06-12  1:48 ` [Qemu-devel] [PATCH v4 02/14] fp-test: add muladd variants Emilio G. Cota
2018-06-12  1:48 ` [Qemu-devel] [PATCH v4 03/14] softfloat: add float{32, 64}_is_{de, }normal Emilio G. Cota
2018-06-12  1:48 ` [Qemu-devel] [PATCH v4 04/14] target/tricore: use float32_is_denormal Emilio G. Cota
2018-06-12  1:48 ` [Qemu-devel] [PATCH v4 05/14] tests/fp: add fp-bench, a collection of simple floating point microbenchmarks Emilio G. Cota
2018-06-12  1:48 ` [Qemu-devel] [PATCH v4 06/14] softfloat: rename canonicalize to sf_canonicalize Emilio G. Cota
2018-06-12  1:48 ` [Qemu-devel] [PATCH v4 07/14] softfloat: add float{32, 64}_is_zero_or_normal Emilio G. Cota
2018-06-12  1:48 ` [Qemu-devel] [PATCH v4 08/14] fpu: introduce hardfloat Emilio G. Cota
2018-06-12  1:48 ` Emilio G. Cota [this message]
2018-06-12  1:48 ` [Qemu-devel] [PATCH v4 10/14] hardfloat: support float32/64 multiplication Emilio G. Cota
2018-06-12  1:48 ` [Qemu-devel] [PATCH v4 11/14] hardfloat: support float32/64 division Emilio G. Cota
2018-06-12  1:48 ` [Qemu-devel] [PATCH v4 12/14] hardfloat: support float32/64 fused multiply-add Emilio G. Cota
2018-06-12  1:48 ` [Qemu-devel] [PATCH v4 13/14] hardfloat: support float32/64 square root Emilio G. Cota
2018-06-12  1:49 ` [Qemu-devel] [PATCH v4 14/14] hardfloat: support float32/64 comparison Emilio G. Cota
2018-06-12  2:21 ` [Qemu-devel] [PATCH v4 00/14] fp-test + hardfloat no-reply
2018-08-13 20:01 ` Emilio G. Cota
2018-08-14 10:17   ` Alex Bennée
2018-08-14 18:27     ` Emilio G. Cota
2018-08-14 19:33       ` Alex Bennée

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1528768140-17894-10-git-send-email-cota@braap.org \
    --to=cota@braap.org \
    --cc=alex.bennee@linaro.org \
    --cc=aurelien@aurel32.net \
    --cc=laurent@vivier.eu \
    --cc=mark.cave-ayland@ilande.co.uk \
    --cc=pbonzini@redhat.com \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=richard.henderson@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).