* [PATCH 1/9] perf test: Adding an entry for the perftool-testsuite
[not found] <cover.1458134357.git.mpetlan@redhat.com>
@ 2016-03-16 13:50 ` Michael Petlan
2016-03-27 12:23 ` Jiri Olsa
` (5 more replies)
2016-03-16 13:51 ` [PATCH 2/9] perf test: adding new testsuite: common files Michael Petlan
` (7 subsequent siblings)
8 siblings, 6 replies; 21+ messages in thread
From: Michael Petlan @ 2016-03-16 13:50 UTC (permalink / raw)
To: linux-perf-users; +Cc: acme, Jiri Olsa
In order to be able to run the bash/subcommand-based testsuite directly
from perf test a new perf-test entry has been created for it.
The driver (suite.c) runs all the tests that it finds in the testsuite
directory: tools/perf/testsuite/ (rather ./testsuite as it expects it
it being run from the perf's directory).
In case the testsuite is not found, the testcase is skipped. If this is
not the desired behaviour, some packaging will be needed.
Usage:
./perf test suite
or
./perf test suite -v
./perf test suite -vv
(for the verbose output)
Signed-off-by: Michael Petlan <mpetlan@redhat.com>
---
tools/perf/tests/Build | 1 +
tools/perf/tests/builtin-test.c | 4 ++
tools/perf/tests/suite.c | 150 ++++++++++++++++++++++++++++++++++++++++
tools/perf/tests/suite.h | 30 ++++++++
tools/perf/tests/tests.h | 1 +
5 files changed, 186 insertions(+)
create mode 100644 tools/perf/tests/suite.c
create mode 100644 tools/perf/tests/suite.h
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index 1ba628e..c26fd8b 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -37,6 +37,7 @@ perf-y += topology.o
perf-y += cpumap.o
perf-y += stat.o
perf-y += event_update.o
+perf-y += suite.o
$(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build
$(call rule_mkdir)
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index f2b1dca..14246a1 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -204,6 +204,10 @@ static struct test generic_tests[] = {
.func = test__event_update,
},
{
+ .desc = "Testsuite",
+ .func = test__suite,
+ },
+ {
.func = NULL,
},
};
diff --git a/tools/perf/tests/suite.c b/tools/perf/tests/suite.c
new file mode 100644
index 0000000..17f6610
--- /dev/null
+++ b/tools/perf/tests/suite.c
@@ -0,0 +1,150 @@
+#include "suite.h"
+
+#define TESTSUITE_ROOT "./testsuite/"
+
+/* globals */
+int fatal_occured = 0;
+char *cwd;
+
+
+/* runs a shell script */
+int _run_shell(const char *script)
+{
+ int ret;
+ char *cmd = malloc(strlen(script) + 3 * sizeof(char));
+ strcpy(cmd, "./");
+ strcpy(cmd + 2, script);
+ ret = system(cmd);
+ if (ret == -1) {
+ fprintf(stderr, "FATAL: Could not run %s", cmd);
+ fatal_occured++;
+ return 1;
+ }
+ return ret;
+}
+
+
+/* checks for existence of a file and runs it */
+int run_shell(const char *script)
+{
+ struct stat sb;
+
+ if (stat(script, &sb) == -1) {
+ fatal_occured++;
+ return 1;
+ }
+
+ if (! (sb.st_mode & (S_IXUSR | S_IFREG))) {
+ fatal_occured++;
+ return 1;
+ }
+
+ return _run_shell(script);
+}
+
+
+/* if a script is available, run it, otherwise ignore it */
+int try_shell(const char *script)
+{
+ struct stat sb;
+
+ if (stat(script, &sb) == -1)
+ return 0;
+
+ if (! (sb.st_mode & (S_IXUSR | S_IFREG)))
+ return 0;
+
+ return _run_shell(script);
+}
+
+
+/* runs a group of tests ("base_something", ...) */
+int run_group(const char *path)
+{
+ DIR *dp;
+ struct dirent *ep;
+ int ret;
+
+ int failures = 0;
+ ret = chdir(path);
+
+ if (verbose)
+ printf("======== %s ========\n", path);
+
+ /* try to run setup */
+ failures += try_shell("setup.sh");
+
+ /* scan the dir and run tests */
+ dp = opendir("./");
+ if (dp != NULL) {
+ while ((ep = readdir(dp))) {
+ if (strncmp(ep->d_name, "test_", 5))
+ continue;
+ failures += run_shell(ep->d_name);
+ }
+ closedir(dp);
+ }
+ else
+ perror("Cannot open inner dir.");
+
+ /* try to do clean-up */
+ try_shell("cleanup.sh");
+
+ ret = chdir("..");
+ ret = ret;
+ if (verbose)
+ printf("\n");
+ return failures;
+}
+
+
+/* main test */
+int test__suite(int subtest __maybe_unused)
+{
+ DIR *dp;
+ struct dirent *ep;
+ int failures = 0;
+ int ret;
+ int test_status = TEST_OK;
+ char verbosity_str[2];
+ int _verbose = (verbose > 9)? 9 : verbose;
+ char tempdir_template[24], _tempdir_template;
+
+ /* set verbosity according to perf-test command line switches */
+ snprintf(verbosity_str, 2, "%i", _verbose);
+ setenv("TESTLOG_VERBOSITY", verbosity_str, 1);
+
+ /* always place all temp files and logs elsewhere */
+ strcpy(tempdir_template, "/tmp/perfsuite-XXXXXX");
+ _tempdir_template = mkdtemp(tempdir_template);
+ setenv("PERFSUITE_RUN_DIR", _tempdir_template, 1);
+
+ cwd = getcwd(NULL, 0);
+ ret = chdir(TESTSUITE_ROOT);
+ if (ret != 0) {
+ free(cwd);
+ return TEST_SKIP;
+ }
+
+ dp = opendir("./");
+ if (dp != NULL) {
+
+ while ((ep = readdir(dp))) {
+ if (strncmp(ep->d_name, "base_", 5))
+ continue;
+ failures += run_group(ep->d_name);
+ }
+ closedir(dp);
+ }
+ else
+ test_status = TEST_SKIP;
+
+ ret = chdir(cwd);
+ ret = ret;
+ free(cwd);
+
+ if(failures || fatal_occured)
+ test_status = TEST_FAIL;
+
+ return test_status;
+}
diff --git a/tools/perf/tests/suite.h b/tools/perf/tests/suite.h
new file mode 100644
index 0000000..53c768b
--- /dev/null
+++ b/tools/perf/tests/suite.h
@@ -0,0 +1,30 @@
+#ifndef PERF_TEST_SUITE_H
+#define PERF_TEST_SUITE_H
+
+#include <linux/compiler.h>
+#include "tests.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <dirent.h>
+#include <unistd.h>
+#include <string.h>
+
+
+/* verbosity */
+extern int verbose;
+
+/* runs a shell script */
+int _run_shell(const char *script);
+
+/* checks for existence of a file and runs it */
+int run_shell(const char *script);
+
+/* if a script is available, run it, otherwise ignore it */
+int try_shell(const char *script);
+
+/* runs a group of tests ("base_something", ...) */
+int run_group(const char *path);
+
+#endif
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index 82b2b5e..af516de 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -85,6 +85,7 @@ int test__synthesize_stat_config(int subtest);
int test__synthesize_stat(int subtest);
int test__synthesize_stat_round(int subtest);
int test__event_update(int subtest);
+int test__suite(int subtest);
#if defined(__arm__) || defined(__aarch64__)
#ifdef HAVE_DWARF_UNWIND_SUPPORT
--
1.8.3.1
^ permalink raw reply related [flat|nested] 21+ messages in thread
* Re: [PATCH 1/9] perf test: Adding an entry for the perftool-testsuite
2016-03-16 13:50 ` [PATCH 1/9] perf test: Adding an entry for the perftool-testsuite Michael Petlan
@ 2016-03-27 12:23 ` Jiri Olsa
2016-03-29 12:57 ` Michael Petlan
2016-03-27 12:23 ` Jiri Olsa
` (4 subsequent siblings)
5 siblings, 1 reply; 21+ messages in thread
From: Jiri Olsa @ 2016-03-27 12:23 UTC (permalink / raw)
To: Michael Petlan; +Cc: linux-perf-users, acme
On Wed, Mar 16, 2016 at 02:50:59PM +0100, Michael Petlan wrote:
SNIP
> +#define TESTSUITE_ROOT "./testsuite/"
> +
> +/* globals */
> +int fatal_occured = 0;
> +char *cwd;
> +
> +
> +/* runs a shell script */
> +int _run_shell(const char *script)
> +{
> + int ret;
> + char *cmd = malloc(strlen(script) + 3 * sizeof(char));
please check the return value
jirka
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH 1/9] perf test: Adding an entry for the perftool-testsuite
2016-03-27 12:23 ` Jiri Olsa
@ 2016-03-29 12:57 ` Michael Petlan
0 siblings, 0 replies; 21+ messages in thread
From: Michael Petlan @ 2016-03-29 12:57 UTC (permalink / raw)
To: Jiri Olsa; +Cc: linux-perf-users, acme
[-- Attachment #1: Type: text/plain, Size: 2265 bytes --]
On Sun, 27 Mar 2016, Jiri Olsa wrote:
> On Wed, Mar 16, 2016 at 02:50:59PM +0100, Michael Petlan wrote:
>
> SNIP
>
>> +#define TESTSUITE_ROOT "./testsuite/"
>> +
>> +/* globals */
>> +int fatal_occured = 0;
>> +char *cwd;
>> +
>> +
>> +/* runs a shell script */
>> +int _run_shell(const char *script)
>> +{
>> + int ret;
>> + char *cmd = malloc(strlen(script) + 3 * sizeof(char));
>
> please check the return value
Fixed in the attached patch.
To the other notes:
> 'cwd' does not need to be global
Fixed in the attached patch.
> you propage all the error all the way through, so what's
> the point of having extra global fatal_occured variable?
It is reserved for distinguishing between a regular failures in the
tests (failures of perf in the ideal state) and the failures of the
infrastructure (cannot allocate memory for the *cmd string above).
if(failures || fatal_occurred)
test_status = TEST_FAIL;
I know that this idiom merges that into TEST_FAIL, but we still are
able to make differences here. The suite is pretty complex, I think
there should be a way to notice infrastructure failures.
> also I think you could erase the ==== quoted lines
I think it is more human-eye-friendly when the sections are separated
by one more \n and the header. I moved that into the (verbose >= 2)
mode. Once there might be twice or thrice more lines and sections as
well. When reviewing the results of the suite, I like the headers.
> is there a point to run tests if the setup fails?
There is a reason, since not all of the tests of the 'group'
(base_something/test_*.sh) need the setup.
Thanks for the notes. The remaining two I have to check separately.
All the failures that I usually encounter have their stderrs cut
out in -v, so the testsuite logs look still nice. I have to check
the failure you posted.
Regarding to the C-c, yes, there are commands running in background,
we need that e.g. for `perf stat -p` testing. However I don't think
that that is why it cannot be C-c'd. I have to look at it more.
Michael
>
> jirka
> --
> To unsubscribe from this list: send the line "unsubscribe linux-perf-users" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
>
[-- Attachment #2: Type: text/plain, Size: 2007 bytes --]
diff --git a/tools/perf/tests/suite.c b/tools/perf/tests/suite.c
index 17f6610..b3310e8 100644
--- a/tools/perf/tests/suite.c
+++ b/tools/perf/tests/suite.c
@@ -3,8 +3,7 @@
#define TESTSUITE_ROOT "./testsuite/"
/* globals */
-int fatal_occured = 0;
-char *cwd;
+int fatal_occurred = 0;
/* runs a shell script */
@@ -12,12 +11,17 @@ int _run_shell(const char *script)
{
int ret;
char *cmd = malloc(strlen(script) + 3 * sizeof(char));
+ if(cmd == NULL)
+ {
+ fprintf(stderr, "FATAL: Could not allocate memory.\n");
+ return 1;
+ }
strcpy(cmd, "./");
strcpy(cmd + 2, script);
ret = system(cmd);
if (ret == -1) {
fprintf(stderr, "FATAL: Could not run %s", cmd);
- fatal_occured++;
+ fatal_occurred++;
return 1;
}
return ret;
@@ -30,12 +34,12 @@ int run_shell(const char *script)
struct stat sb;
if (stat(script, &sb) == -1) {
- fatal_occured++;
+ fatal_occurred++;
return 1;
}
if (! (sb.st_mode & (S_IXUSR | S_IFREG))) {
- fatal_occured++;
+ fatal_occurred++;
return 1;
}
@@ -68,7 +72,7 @@ int run_group(const char *path)
int failures = 0;
ret = chdir(path);
- if (verbose)
+ if (verbose > 1)
printf("======== %s ========\n", path);
/* try to run setup */
@@ -102,13 +106,14 @@ int run_group(const char *path)
int test__suite(int subtest __maybe_unused)
{
DIR *dp;
+ char *cwd;
struct dirent *ep;
int failures = 0;
int ret;
int test_status = TEST_OK;
char verbosity_str[2];
int _verbose = (verbose > 9)? 9 : verbose;
- char tempdir_template[24], _tempdir_template;
+ char tempdir_template[24], *_tempdir_template;
/* set verbosity according to perf-test command line switches */
snprintf(verbosity_str, 2, "%i", _verbose);
@@ -143,7 +148,7 @@ int test__suite(int subtest __maybe_unused)
ret = ret;
free(cwd);
- if(failures || fatal_occured)
+ if(failures || fatal_occurred)
test_status = TEST_FAIL;
return test_status;
^ permalink raw reply related [flat|nested] 21+ messages in thread
* Re: [PATCH 1/9] perf test: Adding an entry for the perftool-testsuite
2016-03-16 13:50 ` [PATCH 1/9] perf test: Adding an entry for the perftool-testsuite Michael Petlan
2016-03-27 12:23 ` Jiri Olsa
@ 2016-03-27 12:23 ` Jiri Olsa
2016-03-27 12:24 ` Jiri Olsa
` (3 subsequent siblings)
5 siblings, 0 replies; 21+ messages in thread
From: Jiri Olsa @ 2016-03-27 12:23 UTC (permalink / raw)
To: Michael Petlan; +Cc: linux-perf-users, acme
On Wed, Mar 16, 2016 at 02:50:59PM +0100, Michael Petlan wrote:
SNIP
> diff --git a/tools/perf/tests/suite.c b/tools/perf/tests/suite.c
> new file mode 100644
> index 0000000..17f6610
> --- /dev/null
> +++ b/tools/perf/tests/suite.c
> @@ -0,0 +1,150 @@
> +#include "suite.h"
> +
> +#define TESTSUITE_ROOT "./testsuite/"
> +
> +/* globals */
> +int fatal_occured = 0;
> +char *cwd;
'cwd' does not need to be global
jirka
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH 1/9] perf test: Adding an entry for the perftool-testsuite
2016-03-16 13:50 ` [PATCH 1/9] perf test: Adding an entry for the perftool-testsuite Michael Petlan
2016-03-27 12:23 ` Jiri Olsa
2016-03-27 12:23 ` Jiri Olsa
@ 2016-03-27 12:24 ` Jiri Olsa
2016-03-27 12:24 ` Jiri Olsa
` (2 subsequent siblings)
5 siblings, 0 replies; 21+ messages in thread
From: Jiri Olsa @ 2016-03-27 12:24 UTC (permalink / raw)
To: Michael Petlan; +Cc: linux-perf-users, acme
On Wed, Mar 16, 2016 at 02:50:59PM +0100, Michael Petlan wrote:
SNIP
> + dp = opendir("./");
> + if (dp != NULL) {
> +
> + while ((ep = readdir(dp))) {
> + if (strncmp(ep->d_name, "base_", 5))
> + continue;
> + failures += run_group(ep->d_name);
> + }
> + closedir(dp);
> + }
> + else
> + test_status = TEST_SKIP;
> +
> + ret = chdir(cwd);
> + ret = ret;
> + free(cwd);
> +
> + if(failures || fatal_occured)
you propage all the error all the way through, so what's
the point of having extra global fatal_occured variable?
thanks,
jirka
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH 1/9] perf test: Adding an entry for the perftool-testsuite
2016-03-16 13:50 ` [PATCH 1/9] perf test: Adding an entry for the perftool-testsuite Michael Petlan
` (2 preceding siblings ...)
2016-03-27 12:24 ` Jiri Olsa
@ 2016-03-27 12:24 ` Jiri Olsa
2016-03-29 12:19 ` Michael Petlan
2016-03-27 12:24 ` Jiri Olsa
2016-03-27 12:24 ` Jiri Olsa
5 siblings, 1 reply; 21+ messages in thread
From: Jiri Olsa @ 2016-03-27 12:24 UTC (permalink / raw)
To: Michael Petlan; +Cc: linux-perf-users, acme
On Wed, Mar 16, 2016 at 02:50:59PM +0100, Michael Petlan wrote:
> In order to be able to run the bash/subcommand-based testsuite directly
> from perf test a new perf-test entry has been created for it.
>
> The driver (suite.c) runs all the tests that it finds in the testsuite
> directory: tools/perf/testsuite/ (rather ./testsuite as it expects it
> it being run from the perf's directory).
>
> In case the testsuite is not found, the testcase is skipped. If this is
> not the desired behaviour, some packaging will be needed.
>
> Usage:
> ./perf test suite
>
> or
> ./perf test suite -v
> ./perf test suite -vv
>
> (for the verbose output)
>
> Signed-off-by: Michael Petlan <mpetlan@redhat.com>
got following compile error:
CC tests/suite.o
tests/suite.c: In function ‘test__suite’:
tests/suite.c:119:20: error: assignment makes integer from pointer without a cast [-Werror=int-conversion]
_tempdir_template = mkdtemp(tempdir_template);
^
tests/suite.c:120:30: error: passing argument 2 of ‘setenv’ makes pointer from integer without a cast [-Werror=int-conversion]
setenv("PERFSUITE_RUN_DIR", _tempdir_template, 1);
^
In file included from tests/suite.h:6:0,
from tests/suite.c:1:
/usr/include/stdlib.h:584:12: note: expected ‘const char *’ but argument is of type ‘char’
extern int setenv (const char *__name, const char *__value, int __replace)
^
cc1: all warnings being treated as errors
moving on with attached patch
thanks,
jirka
---
diff --git a/tools/perf/tests/suite.c b/tools/perf/tests/suite.c
index 17f66107f34d..1561bec3a2e1 100644
--- a/tools/perf/tests/suite.c
+++ b/tools/perf/tests/suite.c
@@ -108,7 +108,8 @@ int test__suite(int subtest __maybe_unused)
int test_status = TEST_OK;
char verbosity_str[2];
int _verbose = (verbose > 9)? 9 : verbose;
- char tempdir_template[24], _tempdir_template;
+ char tempdir_template[24];
+ char *_tempdir_template;
/* set verbosity according to perf-test command line switches */
snprintf(verbosity_str, 2, "%i", _verbose);
^ permalink raw reply related [flat|nested] 21+ messages in thread
* Re: [PATCH 1/9] perf test: Adding an entry for the perftool-testsuite
2016-03-27 12:24 ` Jiri Olsa
@ 2016-03-29 12:19 ` Michael Petlan
0 siblings, 0 replies; 21+ messages in thread
From: Michael Petlan @ 2016-03-29 12:19 UTC (permalink / raw)
To: Jiri Olsa; +Cc: linux-perf-users, acme
[-- Attachment #1: Type: text/plain, Size: 1920 bytes --]
On Sun, 27 Mar 2016, Jiri Olsa wrote:
>
> got following compile error:
>
> CC tests/suite.o
> tests/suite.c: In function ‘test__suite’:
> tests/suite.c:119:20: error: assignment makes integer from pointer without a cast [-Werror=int-conversion]
> _tempdir_template = mkdtemp(tempdir_template);
> ^
> tests/suite.c:120:30: error: passing argument 2 of ‘setenv’ makes pointer from integer without a cast [-Werror=int-conversion]
> setenv("PERFSUITE_RUN_DIR", _tempdir_template, 1);
> ^
> In file included from tests/suite.h:6:0,
> from tests/suite.c:1:
> /usr/include/stdlib.h:584:12: note: expected ‘const char *’ but argument is of type ‘char’
> extern int setenv (const char *__name, const char *__value, int __replace)
> ^
> cc1: all warnings being treated as errors
>
>
> moving on with attached patch
I noticed this error too and that's why I sent the mail with the following $subj:
"[PATCH 1/9] perf test: Adding an entry for the perftool-testsuite - CORRECT"
A stupid mistake, of course it must be a pointer.
Michael
>
> thanks,
> jirka
>
>
> ---
> diff --git a/tools/perf/tests/suite.c b/tools/perf/tests/suite.c
> index 17f66107f34d..1561bec3a2e1 100644
> --- a/tools/perf/tests/suite.c
> +++ b/tools/perf/tests/suite.c
> @@ -108,7 +108,8 @@ int test__suite(int subtest __maybe_unused)
> int test_status = TEST_OK;
> char verbosity_str[2];
> int _verbose = (verbose > 9)? 9 : verbose;
> - char tempdir_template[24], _tempdir_template;
> + char tempdir_template[24];
> + char *_tempdir_template;
>
> /* set verbosity according to perf-test command line switches */
> snprintf(verbosity_str, 2, "%i", _verbose);
> --
> To unsubscribe from this list: send the line "unsubscribe linux-perf-users" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
>
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH 1/9] perf test: Adding an entry for the perftool-testsuite
2016-03-16 13:50 ` [PATCH 1/9] perf test: Adding an entry for the perftool-testsuite Michael Petlan
` (3 preceding siblings ...)
2016-03-27 12:24 ` Jiri Olsa
@ 2016-03-27 12:24 ` Jiri Olsa
2016-03-29 12:19 ` Michael Petlan
2016-03-27 12:24 ` Jiri Olsa
5 siblings, 1 reply; 21+ messages in thread
From: Jiri Olsa @ 2016-03-27 12:24 UTC (permalink / raw)
To: Michael Petlan; +Cc: linux-perf-users, acme
On Wed, Mar 16, 2016 at 02:50:59PM +0100, Michael Petlan wrote:
SNIP
> +/* runs a group of tests ("base_something", ...) */
> +int run_group(const char *path)
> +{
> + DIR *dp;
> + struct dirent *ep;
> + int ret;
> +
> + int failures = 0;
> + ret = chdir(path);
> +
> + if (verbose)
> + printf("======== %s ========\n", path);
> +
> + /* try to run setup */
> + failures += try_shell("setup.sh");
> +
> + /* scan the dir and run tests */
> + dp = opendir("./");
> + if (dp != NULL) {
> + while ((ep = readdir(dp))) {
> + if (strncmp(ep->d_name, "test_", 5))
> + continue;
> + failures += run_shell(ep->d_name);
> + }
> + closedir(dp);
> + }
> + else
> + perror("Cannot open inner dir.");
> +
> + /* try to do clean-up */
> + try_shell("cleanup.sh");
> +
> + ret = chdir("..");
> + ret = ret;
hm? ;-)
jirka
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH 1/9] perf test: Adding an entry for the perftool-testsuite
2016-03-27 12:24 ` Jiri Olsa
@ 2016-03-29 12:19 ` Michael Petlan
2016-03-29 19:39 ` Jiri Olsa
0 siblings, 1 reply; 21+ messages in thread
From: Michael Petlan @ 2016-03-29 12:19 UTC (permalink / raw)
To: Jiri Olsa; +Cc: linux-perf-users, acme
On Sun, 27 Mar 2016, Jiri Olsa wrote:
> On Wed, Mar 16, 2016 at 02:50:59PM +0100, Michael Petlan wrote:
>
> SNIP
>
>> + ret = chdir("..");
>> + ret = ret;
>
> hm? ;-)
That must be there, otherwise the compiler would complain about unused
variable 'ret'. When removing 'ret' at all, the compiler complains about
ignoring return value of 'chdir()'.
Well, ok, there could be something like:
if(!ret)
{
fprintf("FATAL: Cannot cd to ..\n");
return 1;
}
Do you think it is worth of being there instead?
>
> jirka
> --
> To unsubscribe from this list: send the line "unsubscribe linux-perf-users" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
>
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH 1/9] perf test: Adding an entry for the perftool-testsuite
2016-03-29 12:19 ` Michael Petlan
@ 2016-03-29 19:39 ` Jiri Olsa
0 siblings, 0 replies; 21+ messages in thread
From: Jiri Olsa @ 2016-03-29 19:39 UTC (permalink / raw)
To: Michael Petlan; +Cc: linux-perf-users, acme
On Tue, Mar 29, 2016 at 02:19:17PM +0200, Michael Petlan wrote:
> On Sun, 27 Mar 2016, Jiri Olsa wrote:
> >On Wed, Mar 16, 2016 at 02:50:59PM +0100, Michael Petlan wrote:
> >
> >SNIP
> >
> >>+ ret = chdir("..");
> >>+ ret = ret;
> >
> >hm? ;-)
>
> That must be there, otherwise the compiler would complain about unused
> variable 'ret'. When removing 'ret' at all, the compiler complains about
> ignoring return value of 'chdir()'.
>
> Well, ok, there could be something like:
>
> if(!ret)
> {
> fprintf("FATAL: Cannot cd to ..\n");
> return 1;
> }
I dont see a point of having the ret variable at all then..
jirka
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH 1/9] perf test: Adding an entry for the perftool-testsuite
2016-03-16 13:50 ` [PATCH 1/9] perf test: Adding an entry for the perftool-testsuite Michael Petlan
` (4 preceding siblings ...)
2016-03-27 12:24 ` Jiri Olsa
@ 2016-03-27 12:24 ` Jiri Olsa
5 siblings, 0 replies; 21+ messages in thread
From: Jiri Olsa @ 2016-03-27 12:24 UTC (permalink / raw)
To: Michael Petlan; +Cc: linux-perf-users, acme
On Wed, Mar 16, 2016 at 02:50:59PM +0100, Michael Petlan wrote:
SNIP
> +/* runs a group of tests ("base_something", ...) */
> +int run_group(const char *path)
> +{
> + DIR *dp;
> + struct dirent *ep;
> + int ret;
> +
> + int failures = 0;
> + ret = chdir(path);
> +
> + if (verbose)
> + printf("======== %s ========\n", path);
> +
> + /* try to run setup */
> + failures += try_shell("setup.sh");
is there a point to run tests if the setup fails?
jirka
^ permalink raw reply [flat|nested] 21+ messages in thread
* [PATCH 2/9] perf test: adding new testsuite: common files
[not found] <cover.1458134357.git.mpetlan@redhat.com>
2016-03-16 13:50 ` [PATCH 1/9] perf test: Adding an entry for the perftool-testsuite Michael Petlan
@ 2016-03-16 13:51 ` Michael Petlan
2016-03-16 13:54 ` [PATCH 3/9] perf test: new testsuite: perf annotate tests Michael Petlan
` (6 subsequent siblings)
8 siblings, 0 replies; 21+ messages in thread
From: Michael Petlan @ 2016-03-16 13:51 UTC (permalink / raw)
To: linux-perf-users; +Cc: acme, Jiri Olsa
A new testsuite based on black-box testing of perf's subcommands is
located in tools/perf/testsuite. It can be run from the perf's dir
by "perf test suite".
The suite aims on testing the subcommands and their sanity from the
user's point of view. It is designed to be easily extensible when a
new feature needs to be covered.
This commit adds the base part of the testsuite (config, checkers):
common/
init.sh
- to be sourced within the test scripts
patterns.sh
- contains common regexp patterns
settings.sh
- sourced from init.sh
- contains various constants and settings
parametrization.sh
- sourced from settings.sh
- like a config file for enablement and disablement
of some features, regulation of depth, etc.
check_*.pl
- various scripts for checking output
- might be extended by some more advanced checking
tools and techniques
Signed-off-by: Michael Petlan <mpetlan@redhat.com>
---
.../testsuite/common/check_all_lines_matched.pl | 38 +++++++
.../testsuite/common/check_all_patterns_found.pl | 33 ++++++
.../testsuite/common/check_any_pattern_found.pl | 14 +++
.../testsuite/common/check_buildids_vs_files.pl | 31 ++++++
.../testsuite/common/check_errors_whitelisted.pl | 50 +++++++++
.../testsuite/common/check_exact_pattern_order.pl | 27 +++++
.../testsuite/common/check_kallsyms_vs_probes.pl | 61 +++++++++++
.../testsuite/common/check_no_patterns_found.pl | 33 ++++++
tools/perf/testsuite/common/init.sh | 67 ++++++++++++
tools/perf/testsuite/common/parametrization.sh | 39 +++++++
tools/perf/testsuite/common/patterns.sh | 119 +++++++++++++++++++++
tools/perf/testsuite/common/settings.sh | 57 ++++++++++
12 files changed, 569 insertions(+)
create mode 100755 tools/perf/testsuite/common/check_all_lines_matched.pl
create mode 100755 tools/perf/testsuite/common/check_all_patterns_found.pl
create mode 100755 tools/perf/testsuite/common/check_any_pattern_found.pl
create mode 100755 tools/perf/testsuite/common/check_buildids_vs_files.pl
create mode 100755 tools/perf/testsuite/common/check_errors_whitelisted.pl
create mode 100755 tools/perf/testsuite/common/check_exact_pattern_order.pl
create mode 100755 tools/perf/testsuite/common/check_kallsyms_vs_probes.pl
create mode 100755 tools/perf/testsuite/common/check_no_patterns_found.pl
create mode 100644 tools/perf/testsuite/common/init.sh
create mode 100644 tools/perf/testsuite/common/parametrization.sh
create mode 100644 tools/perf/testsuite/common/patterns.sh
create mode 100644 tools/perf/testsuite/common/settings.sh
diff --git a/tools/perf/testsuite/common/check_all_lines_matched.pl b/tools/perf/testsuite/common/check_all_lines_matched.pl
new file mode 100755
index 0000000..b3d9990
--- /dev/null
+++ b/tools/perf/testsuite/common/check_all_lines_matched.pl
@@ -0,0 +1,38 @@
+#!/usr/bin/perl
+
+@regexps = @ARGV;
+
+$max_printed_lines = 20;
+$max_printed_lines = $ENV{ERROR_MESSAGE_MAX_LINES} if (defined $ENV{ERROR_MESSAGE_MAX_LINES});
+
+$quiet = 1;
+$quiet = 0 if (defined $ENV{TESTLOG_VERBOSITY} && $ENV{TESTLOG_VERBOSITY} ge 2);
+
+$passed = 1;
+$lines_printed = 0;
+
+while (<STDIN>)
+{
+ s/\n//;
+
+ $line_matched = 0;
+ for $r (@regexps)
+ {
+ if (/$r/)
+ {
+ $line_matched = 1;
+ last;
+ }
+ }
+
+ unless ($line_matched)
+ {
+ if ($lines_printed++ < $max_printed_lines)
+ {
+ print "Line did not match any pattern: \"$_\"\n" unless $quiet;
+ }
+ $passed = 0;
+ }
+}
+
+exit ($passed == 0);
diff --git a/tools/perf/testsuite/common/check_all_patterns_found.pl b/tools/perf/testsuite/common/check_all_patterns_found.pl
new file mode 100755
index 0000000..392d3f8a
--- /dev/null
+++ b/tools/perf/testsuite/common/check_all_patterns_found.pl
@@ -0,0 +1,33 @@
+#!/usr/bin/perl
+
+@regexps = @ARGV;
+
+$quiet = 1;
+$quiet = 0 if (defined $ENV{TESTLOG_VERBOSITY} && $ENV{TESTLOG_VERBOSITY} ge 2);
+
+%found = ();
+$passed = 1;
+
+while (<STDIN>)
+{
+ s/\n//;
+
+ for $r (@regexps)
+ {
+ if (/$r/)
+ {
+ $found{$r} = 1; # FIXME: maybe add counters -- how many times was the regexp matched
+ }
+ }
+}
+
+for $r (@regexps)
+{
+ unless (exists $found{$r})
+ {
+ print "Regexp not found: \"$r\"\n" unless $quiet;
+ $passed = 0;
+ }
+}
+
+exit ($passed == 0);
diff --git a/tools/perf/testsuite/common/check_any_pattern_found.pl b/tools/perf/testsuite/common/check_any_pattern_found.pl
new file mode 100755
index 0000000..215c65a0c
--- /dev/null
+++ b/tools/perf/testsuite/common/check_any_pattern_found.pl
@@ -0,0 +1,14 @@
+#!/usr/bin/perl
+
+@regexps = @ARGV;
+
+while (<STDIN>)
+{
+ s/\n//;
+ for $r (@regexps)
+ {
+ exit 0 if (/$r/);
+ }
+}
+
+exit 1;
diff --git a/tools/perf/testsuite/common/check_buildids_vs_files.pl b/tools/perf/testsuite/common/check_buildids_vs_files.pl
new file mode 100755
index 0000000..777a5ff
--- /dev/null
+++ b/tools/perf/testsuite/common/check_buildids_vs_files.pl
@@ -0,0 +1,31 @@
+#!/usr/bin/perl
+
+$quiet = 1;
+$quiet = 0 if (defined $ENV{TESTLOG_VERBOSITY} && $ENV{TESTLOG_VERBOSITY} ge 2);
+
+$passed = 1;
+
+while (<STDIN>)
+{
+ chomp;
+ ($buildid_from_list, $filepath) = $_ =~ /^(\w{40})\s+((?:\/[\w\+.-]+)+|(?:\[kernel\.kallsyms\]))$/;
+ if ($filepath =~ /\[kernel\.kallsyms\]/)
+ {
+ $CMD_PERF = $ENV{'CMD_PERF'};
+ $buildid_from_file = `$CMD_PERF buildid-list -k`;
+ chomp $buildid_from_file;
+ }
+ else
+ {
+ $filecmd_output = `file $filepath 2>/dev/null`;
+ ($buildid_from_file) = $filecmd_output =~ /BuildID\[sha1\]=(\w{40})/;
+ }
+
+ if ($buildid_from_file ne $buildid_from_list)
+ {
+ $passed = 0;
+ print "$filepath has $buildid_from_file buildid but perf shows $buildid_from_list\n" unless $quiet;
+ }
+}
+
+exit !($passed);
diff --git a/tools/perf/testsuite/common/check_errors_whitelisted.pl b/tools/perf/testsuite/common/check_errors_whitelisted.pl
new file mode 100755
index 0000000..274bac8
--- /dev/null
+++ b/tools/perf/testsuite/common/check_errors_whitelisted.pl
@@ -0,0 +1,50 @@
+#!/usr/bin/perl
+
+$whitelist_file = shift;
+
+if (defined $whitelist_file)
+{
+ open (INFILE, $whitelist_file) or die "Checker error: Unable to open the whitelist file: $whitelist_file\n";
+ @regexps = <INFILE>;
+ close INFILE or die "Checker error: Unable to close the whitelist file: $whitelist_file\n";
+}
+else
+{
+ @regexps = ();
+}
+
+$max_printed_lines = 20;
+$max_printed_lines = $ENV{ERROR_MESSAGE_MAX_LINES} if (defined $ENV{ERROR_MESSAGE_MAX_LINES});
+
+$quiet = 1;
+$quiet = 0 if (defined $ENV{TESTLOG_VERBOSITY} && $ENV{TESTLOG_VERBOSITY} ge 2);
+
+$passed = 1;
+$lines_printed = 0;
+
+while (<STDIN>)
+{
+ s/\n//;
+
+ $line_matched = 0;
+ for $r (@regexps)
+ {
+ chomp $r;
+ if (/$r/)
+ {
+ $line_matched = 1;
+ last;
+ }
+ }
+
+ unless ($line_matched)
+ {
+ if ($lines_printed++ < $max_printed_lines)
+ {
+ print "Line did not match any pattern: \"$_\"\n" unless $quiet;
+ }
+ $passed = 0;
+ }
+}
+
+exit ($passed == 0);
diff --git a/tools/perf/testsuite/common/check_exact_pattern_order.pl b/tools/perf/testsuite/common/check_exact_pattern_order.pl
new file mode 100755
index 0000000..cd2c7ed
--- /dev/null
+++ b/tools/perf/testsuite/common/check_exact_pattern_order.pl
@@ -0,0 +1,27 @@
+#!/usr/bin/perl
+
+@regexps = @ARGV;
+
+$quiet = 1;
+$quiet = 0 if (defined $ENV{TESTLOG_VERBOSITY} && $ENV{TESTLOG_VERBOSITY} ge 2);
+
+$passed = 1;
+$r = shift @regexps;
+
+while (<STDIN>)
+{
+ s/\n//;
+
+ if (/$r/)
+ {
+ $r = shift @regexps;
+ }
+}
+
+if (defined $r)
+{
+ print "Pattern not found in the proper order: $r\n" unless $quiet;
+ exit 1;
+}
+
+exit 0;
diff --git a/tools/perf/testsuite/common/check_kallsyms_vs_probes.pl b/tools/perf/testsuite/common/check_kallsyms_vs_probes.pl
new file mode 100755
index 0000000..c463294
--- /dev/null
+++ b/tools/perf/testsuite/common/check_kallsyms_vs_probes.pl
@@ -0,0 +1,61 @@
+#!/usr/bin/perl
+
+$matched = 0;
+$missing = 0;
+$all = 0;
+
+$threshold = 85;
+
+$kallsyms_file = $ARGV[0];
+$kfuncs_file = $ARGV[1];
+
+$quiet = 1;
+$quiet = 0 if (defined $ENV{TESTLOG_VERBOSITY} && $ENV{TESTLOG_VERBOSITY} ge 2);
+
+sub my_die
+{
+ my $msg = shift;
+ unless ($quiet)
+ {
+ print STDERR "$msg";
+ }
+ exit 1;
+}
+
+# load the kallsyms into a hash
+%kallsyms_hash = ();
+open (INFILE, $kallsyms_file) or my_die "ERROR: Unable to open $kallsyms_file.\n";
+@kallsyms_lines = <INFILE>;
+close INFILE or my_die "ERROR: Unable to close $kallsyms_file\n";
+
+for (@kallsyms_lines)
+{
+ chomp;
+ next unless /[\da-fA-F]+\s\w\s(\w+)/;
+
+ $kallsyms_hash{$1} = 1;
+}
+
+# check the kfuncs
+open (INFILE, $kfuncs_file) or my_die "ERROR: Unable to open $kfuncs_file\n";
+@kfuncs_lines = <INFILE>;
+close INFILE or my_die "ERROR: Unable to close $kfuncs_file\n";
+
+for (@kfuncs_lines)
+{
+ chomp;
+ if (exists $kallsyms_hash{$_})
+ {
+ $matched++;
+ }
+ else
+ {
+ $missing++;
+ }
+ $all++;
+}
+
+$rate = ($matched / $all) * 100;
+printf("%d%% matches\n", $rate) unless $quiet;
+
+exit !($rate > $threshold);
diff --git a/tools/perf/testsuite/common/check_no_patterns_found.pl b/tools/perf/testsuite/common/check_no_patterns_found.pl
new file mode 100755
index 0000000..52e6f8e
--- /dev/null
+++ b/tools/perf/testsuite/common/check_no_patterns_found.pl
@@ -0,0 +1,33 @@
+#!/usr/bin/perl
+
+@regexps = @ARGV;
+
+$quiet = 1;
+$quiet = 0 if (defined $ENV{TESTLOG_VERBOSITY} && $ENV{TESTLOG_VERBOSITY} ge 2);
+
+%found = ();
+$passed = 1;
+
+while (<STDIN>)
+{
+ s/\n//;
+
+ for $r (@regexps)
+ {
+ if (/$r/)
+ {
+ $found{$r} = 1;
+ }
+ }
+}
+
+for $r (@regexps)
+{
+ if (exists $found{$r})
+ {
+ print "Regexp found: \"$r\"\n" unless $quiet;
+ $passed = 0;
+ }
+}
+
+exit ($passed == 0);
diff --git a/tools/perf/testsuite/common/init.sh b/tools/perf/testsuite/common/init.sh
new file mode 100644
index 0000000..d1c695b
--- /dev/null
+++ b/tools/perf/testsuite/common/init.sh
@@ -0,0 +1,67 @@
+#
+# init.sh
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This file should be used for initialization of basic functions
+# for checking, reporting results etc.
+#
+#
+
+
+. ../common/settings.sh
+. ../common/patterns.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+
+_echo()
+{
+ test "$TESTLOG_VERBOSITY" -ne 0 && echo -e "$@"
+}
+
+print_results()
+{
+ PERF_RETVAL="$1"; shift
+ CHECK_RETVAL="$1"; shift
+ FAILURE_REASON=""
+ TASK_COMMENT="$@"
+ if [ $PERF_RETVAL -eq 0 -a $CHECK_RETVAL -eq 0 ]; then
+ _echo "$MPASS-- [ PASS ] --$MEND $TEST_NAME :: $THIS_TEST_NAME :: $TASK_COMMENT"
+ return 0
+ else
+ if [ $PERF_RETVAL -ne 0 ]; then
+ FAILURE_REASON="command exitcode"
+ fi
+ if [ $CHECK_RETVAL -ne 0 ]; then
+ test -n "$FAILURE_REASON" && FAILURE_REASON="$FAILURE_REASON + "
+ FAILURE_REASON="$FAILURE_REASON""output regexp parsing"
+ fi
+ _echo "$MFAIL-- [ FAIL ] --$MEND $TEST_NAME :: $THIS_TEST_NAME :: $TASK_COMMENT ($FAILURE_REASON)"
+ return 1
+ fi
+}
+
+print_overall_results()
+{
+ RETVAL="$1"; shift
+ if [ $RETVAL -eq 0 ]; then
+ _echo "$MALLPASS## [ PASS ] ##$MEND $TEST_NAME :: $THIS_TEST_NAME SUMMARY"
+ else
+ _echo "$MALLFAIL## [ FAIL ] ##$MEND $TEST_NAME :: $THIS_TEST_NAME SUMMARY :: $RETVAL failures found"
+ fi
+ return $RETVAL
+}
+
+print_testcase_skipped()
+{
+ TASK_COMMENT="$@"
+ _echo "$MSKIP-- [ SKIP ] --$MEND $TEST_NAME :: $THIS_TEST_NAME :: $TASK_COMMENT :: testcase skipped"
+ return 0
+}
+
+print_overall_skipped()
+{
+ _echo "$MSKIP## [ SKIP ] ##$MEND $TEST_NAME :: $THIS_TEST_NAME :: testcase skipped"
+ return 0
+}
diff --git a/tools/perf/testsuite/common/parametrization.sh b/tools/perf/testsuite/common/parametrization.sh
new file mode 100644
index 0000000..617f47e
--- /dev/null
+++ b/tools/perf/testsuite/common/parametrization.sh
@@ -0,0 +1,39 @@
+#
+# parametrization.sh
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This file configures the testcases how deeply they should
+# look at things. The parametrization allows you to use the suite
+# for both smoke testing and deeper testing.
+#
+
+#### general
+
+# If set, the '--help' outputs of the commands will be checked.
+# Since the '--help' option calls a manpage and manpages are not
+# always generated, you may skip the man-page checks.
+export PARAM_GENERAL_HELP_TEXT_CHECK=${PARAM_GENERAL_HELP_TEXT_CHECK:-n}
+
+
+#### perf_stat
+
+# If set, the 24x7 events will be tested on all available cores.
+# That might make it 'nproc' times longer. Basically it should be
+# enough to run each event on one core only.
+# Note: POWER8 only
+export PARAM_STAT_24x7_ALL_CORES=${PARAM_STAT_24x7_ALL_CORES:-n}
+
+# If set, all the tracepoint events will be checked for syntax
+# errors in their definition. This testcase may take a long time
+# and the checks are not that crucial, so it can be turned off
+# when you do not want to deep dive.
+export PARAM_STAT_TRACEPOINT_EVENTS_SYNTAX=${PARAM_STAT_TRACEPOINT_EVENTS_SYNTAX:-n}
+
+
+#### perf_trace
+
+# If set, some heavy load tests are run. That might be not always
+# desired behaviour.
+export PARAM_TRACE_OVERLOAD=${PARAM_TRACE_OVERLOAD:-y}
diff --git a/tools/perf/testsuite/common/patterns.sh b/tools/perf/testsuite/common/patterns.sh
new file mode 100644
index 0000000..eb86477
--- /dev/null
+++ b/tools/perf/testsuite/common/patterns.sh
@@ -0,0 +1,119 @@
+export RE_NUMBER="[0-9\.]+"
+# Number
+# Examples:
+# 123.456
+
+
+export RE_NUMBER_HEX="[0-9A-Fa-f]+"
+# Hexadecimal number
+# Examples:
+# 1234
+# a58d
+# aBcD
+# deadbeef
+
+
+export RE_PROCESS_PID="\w+\/\d+"
+# A process with PID
+# Example:
+# sleep/4102
+
+
+export RE_EVENT_ANY="[\w\-\:\/_=,]+"
+# Name of any event (universal)
+# Examples:
+# cpu-cycles
+# cpu/event=12,umask=34/
+# r41e1
+# nfs:nfs_getattr_enter
+
+
+export RE_EVENT="[\w\-:_]+"
+# Name of an usual event
+# Examples:
+# cpu-cycles
+
+
+export RE_EVENT_RAW="r$RE_NUMBER_HEX"
+# Specification of a raw event
+# Examples:
+# r41e1
+# r1a
+
+
+export RE_EVENT_CPU="cpu/(\w=""$RE_NUMBER_HEX"",?)+/p*" # FIXME
+# Specification of a CPU event
+# Examples:
+# cpu/event=12,umask=34/pp
+
+
+export RE_EVENT_UNCORE="uncore/[\w_]+/"
+# Specification of an uncore event
+# Examples:
+# uncore/qhl_request_local_reads/
+
+
+export RE_EVENT_SUBSYSTEM="[\w\-]+:[\w\-]+"
+# Name of an event from subsystem
+# Examples:
+# ext4:ext4_ordered_write_end
+# sched:sched_switch
+
+
+export RE_PATH="(?:\/[\w\+\.-]+)+"
+# A full filepath
+# Examples:
+# /usr/lib64/somelib.so.5.4.0
+# /lib/modules/4.3.0-rc5/kernel/fs/xfs/xfs.ko
+# /usr/bin/mv
+
+
+export RE_LINE_COMMENT="^#.*"
+# A comment line
+# Examples:
+# # Started on Thu Sep 10 11:43:00 2015
+
+
+export RE_LINE_EMPTY="^\s*$"
+# An empty line with possible whitespaces
+# Examples:
+#
+
+
+export RE_LINE_RECORD1="^\[\s+perf\s+record:\s+Woken up $RE_NUMBER times? to write data\s+\].*$"
+# The first line of perf-record "OK" output
+# Examples:
+# [ perf record: Woken up 1 times to write data ]
+
+
+export RE_LINE_RECORD2="^\[\s+perf\s+record:\s+Captured and wrote $RE_NUMBER\s*MB\s+(?:[\w\+\.-]*(?:$RE_PATH)?\/)?perf\.data\s*\(~?$RE_NUMBER samples\)\s+\].*$"
+# The second line of perf-record "OK" output
+# Examples:
+# [ perf record: Captured and wrote 0.405 MB perf.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB perf.data (~109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB /some/temp/dir/perf.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB ./perf.data (109 samples) ]
+
+
+export RE_LINE_TRACE="^\s*$RE_NUMBER\s*\(\s*$RE_NUMBER\s*ms\s*\):\s*$RE_PROCESS_PID\s+.*\)\s+=\s+\-?$RE_NUMBER|$RE_NUMBER_HEX.*$"
+# A line of perf-trace output
+# Examples:
+# 0.115 ( 0.005 ms): sleep/4102 open(filename: 0xd09e2ab2, flags: CLOEXEC ) = 3
+# 0.157 ( 0.005 ms): sleep/4102 mmap(len: 3932736, prot: EXEC|READ, flags: PRIVATE|DENYWRITE, fd: 3 ) = 0x7f89d0605000
+
+export RE_LINE_TRACE_SUMMARY_HEADER="\s*syscall\s+calls\s+total\s+min\s+avg\s+max\s+stddev"
+# A header of a perf-trace summary table
+# Example:
+# syscall calls total min avg max stddev
+
+
+export RE_LINE_TRACE_SUMMARY_CONTENT="\s*\w+\s+(?:$RE_NUMBER\s+){5}$RE_NUMBER%"
+# A line of a perf-trace summary table
+# Example:
+# open 3 0.017 0.005 0.006 0.007 10.90%
+
+
+export RE_LINE_REPORT_CONTENT="^\s+$RE_NUMBER%\s+\w+\s+\S+\s+\S+\s+\S+" # FIXME
+# A line from typicap perf report --stdio output
+# Example:
+# 100.00% sleep [kernel.vmlinux] [k] syscall_return_slowpath
diff --git a/tools/perf/testsuite/common/settings.sh b/tools/perf/testsuite/common/settings.sh
new file mode 100644
index 0000000..68eb2f0
--- /dev/null
+++ b/tools/perf/testsuite/common/settings.sh
@@ -0,0 +1,57 @@
+#
+# settings.sh
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This file contains global settings for the whole testsuite.
+# Its purpose is to make it easier when it is necessary i.e. to
+# change the usual sample command which is used in all of the tests
+# in many files.
+#
+# This file is intended to be sourced in the tests.
+#
+
+#### which perf to use in the testing
+export CMD_PERF=${CMD_PERF:-`which perf`}
+
+#### basic programs examinated by perf
+export CMD_BASIC_SLEEP="sleep 0.1"
+export CMD_QUICK_SLEEP="sleep 0.01"
+export CMD_LONGER_SLEEP="sleep 2"
+export CMD_SIMPLE="true"
+
+#### common settings
+export TESTLOG_VERBOSITY=${TESTLOG_VERBOSITY:-0}
+export ERROR_MESSAGE_MAX_LINES=${ERROR_MESSAGE_MAX_LINES:-20}
+
+#### clear locale
+export LC_ALL=C
+
+#### colors
+if [ -t 1 ]; then
+ export MPASS="\e[32m"
+ export MALLPASS="\e[1;32m"
+ export MFAIL="\e[31m"
+ export MALLFAIL="\e[1;31m"
+ export MWARN="\e[1;35m"
+ export MSKIP="\e[33m"
+ export MHIGH="\e[1;33m"
+ export MEND="\e[m"
+else
+ export MPASS=""
+ export MALLPASS=""
+ export MFAIL=""
+ export MALLFAIL=""
+ export MWARN=""
+ export MSKIP=""
+ export MHIGH=""
+ export MEND=""
+fi
+
+
+#### test parametrization
+if [ ! -d ./common ]; then
+ # FIXME nasty hack
+ . ../common/parametrization.sh
+fi
--
1.8.3.1
^ permalink raw reply related [flat|nested] 21+ messages in thread
* [PATCH 3/9] perf test: new testsuite: perf annotate tests
[not found] <cover.1458134357.git.mpetlan@redhat.com>
2016-03-16 13:50 ` [PATCH 1/9] perf test: Adding an entry for the perftool-testsuite Michael Petlan
2016-03-16 13:51 ` [PATCH 2/9] perf test: adding new testsuite: common files Michael Petlan
@ 2016-03-16 13:54 ` Michael Petlan
2016-03-16 13:55 ` [PATCH 4/9] perf test: new testsuite: perf buildid-list tests Michael Petlan
` (5 subsequent siblings)
8 siblings, 0 replies; 21+ messages in thread
From: Michael Petlan @ 2016-03-16 13:54 UTC (permalink / raw)
To: linux-perf-users; +Cc: acme, Jiri Olsa
This commit adds tests for perf annotate tool.
The structure of the base_something dirs is the following:
base_something/
settings.sh
- a script mentioned to be sourced within the tests
setup.sh
- if present, necessary for setup of the subset
test_*.sh
- various tests of the subset
cleanup.sh
- a cleanup script that should remove logs, etc.
All the tests should be stand-alone. So if needed, it is enough to
cd to the proper base_directory and run the test. Sometimes, setup
is needed to be run first.
Example:
cd testsuite/base_annotate
./setup.sh
./test_basic.sh
Signed-off-by: Michael Petlan <mpetlan@redhat.com>
---
tools/perf/testsuite/base_annotate/cleanup.sh | 24 +++
.../perf/testsuite/base_annotate/examples/Makefile | 16 ++
tools/perf/testsuite/base_annotate/examples/load.c | 24 +++
tools/perf/testsuite/base_annotate/settings.sh | 31 ++++
tools/perf/testsuite/base_annotate/setup.sh | 35 +++++
tools/perf/testsuite/base_annotate/test_basic.sh | 169 +++++++++++++++++++++
6 files changed, 299 insertions(+)
create mode 100755 tools/perf/testsuite/base_annotate/cleanup.sh
create mode 100644 tools/perf/testsuite/base_annotate/examples/Makefile
create mode 100644 tools/perf/testsuite/base_annotate/examples/load.c
create mode 100644 tools/perf/testsuite/base_annotate/settings.sh
create mode 100755 tools/perf/testsuite/base_annotate/setup.sh
create mode 100755 tools/perf/testsuite/base_annotate/test_basic.sh
diff --git a/tools/perf/testsuite/base_annotate/cleanup.sh b/tools/perf/testsuite/base_annotate/cleanup.sh
new file mode 100755
index 0000000..dd66675
--- /dev/null
+++ b/tools/perf/testsuite/base_annotate/cleanup.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+#
+# cleanup.sh of perf annotate test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+if [ -n "$PERFSUITE_RUN_DIR" ]; then
+ print_overall_skipped
+ exit 0
+fi
+
+make -s -C examples clean
+find . -name \*.log | xargs -r rm
+find . -name \*.err | xargs -r rm
+rm -f perf.data*
+
+print_overall_results 0
+exit $?
diff --git a/tools/perf/testsuite/base_annotate/examples/Makefile b/tools/perf/testsuite/base_annotate/examples/Makefile
new file mode 100644
index 0000000..3248b6c
--- /dev/null
+++ b/tools/perf/testsuite/base_annotate/examples/Makefile
@@ -0,0 +1,16 @@
+CC=gcc
+CFLAGS=-g -O0
+
+SRCS = $(wildcard *.c)
+PROGS = $(patsubst %.c,%,$(SRCS))
+
+all: $(PROGS)
+ifneq "$(MAKE_TARGET_DIR)x" 'x'
+ mv $(PROGS) $(MAKE_TARGET_DIR)/
+endif
+
+%: %.c
+ $(CC) $(CFLAGS) -o $@ $<
+
+clean:
+ rm -f $(PROGS)
diff --git a/tools/perf/testsuite/base_annotate/examples/load.c b/tools/perf/testsuite/base_annotate/examples/load.c
new file mode 100644
index 0000000..c8f30d1
--- /dev/null
+++ b/tools/perf/testsuite/base_annotate/examples/load.c
@@ -0,0 +1,24 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+
+int main (int argc, char *argv[])
+{
+ long from, i, j = 20L;
+
+ if (argc > 1)
+ from = atol (argv[1]);
+ else
+ from = 20L;
+
+ for (i = 1L; j; ++i)
+ {
+ for (j = from; j > 0L; --j)
+ if (i % j)
+ break;
+ }
+
+ printf ("%ld\n", --i);
+
+ return 0;
+}
diff --git a/tools/perf/testsuite/base_annotate/settings.sh b/tools/perf/testsuite/base_annotate/settings.sh
new file mode 100644
index 0000000..398ed27
--- /dev/null
+++ b/tools/perf/testsuite/base_annotate/settings.sh
@@ -0,0 +1,31 @@
+#
+# settings.sh of perf_annotate test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+# FIXME
+#
+#
+
+export TEST_NAME="perf_annotate"
+export MY_ARCH=`arch`
+export MY_HOSTNAME=`hostname`
+export MY_KERNEL_VERSION=`uname -r`
+export MY_CPUS_ONLINE=`nproc`
+export MY_CPUS_AVAILABLE=`cat /proc/cpuinfo | grep -P "processor\s" | wc -l`
+
+if [ -n "$PERFSUITE_RUN_DIR" ]; then
+ # when $PERFSUITE_RUN_DIR is set to something, all the logs and temp files will be placed there
+ # --> the $PERFSUITE_RUN_DIR/perf_something/examples and $PERFSUITE_RUN_DIR/perf_something/logs
+ # dirs will be used for that
+ export PERFSUITE_RUN_DIR=`readlink -f $PERFSUITE_RUN_DIR`
+ export CURRENT_TEST_DIR="$PERFSUITE_RUN_DIR/$TEST_NAME"
+ export MAKE_TARGET_DIR="$CURRENT_TEST_DIR/examples"
+ test -d "$MAKE_TARGET_DIR" || mkdir -p "$MAKE_TARGET_DIR"
+ export LOGS_DIR="$PERFSUITE_RUN_DIR/$TEST_NAME/logs"
+ test -d "$LOGS_DIR" || mkdir -p "$LOGS_DIR"
+else
+ # when $PERFSUITE_RUN_DIR is not set, logs will be placed here
+ export CURRENT_TEST_DIR="."
+ export LOGS_DIR="."
+fi
diff --git a/tools/perf/testsuite/base_annotate/setup.sh b/tools/perf/testsuite/base_annotate/setup.sh
new file mode 100755
index 0000000..54b1e17
--- /dev/null
+++ b/tools/perf/testsuite/base_annotate/setup.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+#
+# setup.sh of perf annotate test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# FIXME - build C program
+#
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+
+make -s -C examples
+print_results $? 0 "building the example code"
+TEST_RESULT=$?
+
+# record some data
+$CMD_PERF record -o $CURRENT_TEST_DIR/perf.data $CURRENT_TEST_DIR/examples/load > /dev/null 2> $LOGS_DIR/setup_record.log
+PERF_EXIT_CODE=$?
+
+# check the perf record output
+../common/check_all_lines_matched.pl "$RE_LINE_RECORD1" "$RE_LINE_RECORD2" < $LOGS_DIR/setup_record.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "record data"
+(( TEST_RESULT += $? ))
+
+print_overall_results $TEST_RESULT
+exit $?
diff --git a/tools/perf/testsuite/base_annotate/test_basic.sh b/tools/perf/testsuite/base_annotate/test_basic.sh
new file mode 100755
index 0000000..196c38b
--- /dev/null
+++ b/tools/perf/testsuite/base_annotate/test_basic.sh
@@ -0,0 +1,169 @@
+#!/bin/bash
+
+#
+# test_basic of perf annotate test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test tests basic functionality of perf annotate command.
+#
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+TEST_RESULT=0
+
+
+### help message
+
+if [ "$PARAM_GENERAL_HELP_TEXT_CHECK" = "y" ]; then
+ # test that a help message is shown and looks reasonable
+ $CMD_PERF annotate --help > $LOGS_DIR/basic_helpmsg.log
+ PERF_EXIT_CODE=$?
+
+ ../common/check_all_patterns_found.pl "PERF-ANNOTATE" "NAME" "SYNOPSIS" "DESCRIPTION" "OPTIONS" "SEE ALSO" < $LOGS_DIR/basic_helpmsg.log
+ CHECK_EXIT_CODE=$?
+ ../common/check_all_patterns_found.pl "perf\-annotate \- Read perf.data .* display annotated code" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_all_patterns_found.pl "input" "dsos" "symbol" "force" "verbose" "dump-raw-trace" "vmlinux" "modules" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_all_patterns_found.pl "print-line" "full-paths" "stdio" "tui" "cpu" "source" "symfs" "disassembler-style" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_all_patterns_found.pl "objdump" "skip-missing" "group" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "help message"
+ (( TEST_RESULT += $? ))
+else
+ print_testcase_skipped "help message"
+fi
+
+
+### basic execution
+
+# annotate...
+( cd $CURRENT_TEST_DIR ; $CMD_PERF annotate --stdio > $LOGS_DIR/basic_annotate.log 2> $LOGS_DIR/basic_annotate.err )
+PERF_EXIT_CODE=$?
+
+# check the annotate output; default option means both source and assembly
+REGEX_HEADER="Percent.*Source code.*Disassembly\sof"
+REGEX_LINE="$RE_NUMBER\s+:\s+$RE_NUMBER_HEX\s*:.*"
+REGEX_SECTION__TEXT="Disassembly of section \.text:"
+# check for the basic structure
+../common/check_all_patterns_found.pl "$REGEX_HEADER load" "$REGEX_LINE" "$REGEX_SECTION__TEXT" < $LOGS_DIR/basic_annotate.log
+CHECK_EXIT_CODE=$?
+# check for the source code presence
+../common/check_all_patterns_found.pl "main" "from = atol" "from = 20L;" "for\s*\(i = 1L; j; \+\+i\)" "return 0;" < $LOGS_DIR/basic_annotate.log
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "basic execution - annotate"
+(( TEST_RESULT += $? ))
+
+
+### dso filter
+
+# '--dso SOME_DSO' limits the annotation to SOME_DSO only
+( cd $CURRENT_TEST_DIR ; $CMD_PERF annotate --stdio --dso load > $LOGS_DIR/basic_dso.log 2> $LOGS_DIR/basic_dso.err )
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "$REGEX_HEADER load" "$REGEX_LINE" "$REGEX_SECTION__TEXT" < $LOGS_DIR/basic_dso.log
+CHECK_EXIT_CODE=$?
+# check for the source code presence
+../common/check_all_patterns_found.pl "main\s*\(" "from = atol" "from = 20L;" "for\s*\(i = 1L; j; \+\+i\)" "return 0;" < $LOGS_DIR/basic_dso.log
+(( CHECK_EXIT_CODE += $? ))
+# check whether the '--dso' option cuts the output to one dso only
+test `grep -c "Disassembly" $LOGS_DIR/basic_dso.log` -ge 2 # FIXME wrong
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "dso filter"
+(( TEST_RESULT += $? ))
+
+
+### no-source
+
+# '--no-source' should show only the assembly code
+( cd $CURRENT_TEST_DIR ; $CMD_PERF annotate --stdio --no-source --dso load > $LOGS_DIR/basic_nosource.log 2> $LOGS_DIR/basic_nosource.err )
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "$REGEX_HEADER load" "$REGEX_LINE" "$REGEX_SECTION__TEXT" < $LOGS_DIR/basic_nosource.log
+CHECK_EXIT_CODE=$?
+# the C source should not be there
+../common/check_no_patterns_found.pl "from = atol" "from = 20L;" "for\s*\(i = 1L; j; \+\+i\)" "return 0;" < $LOGS_DIR/basic_nosource.log
+(( CHECK_EXIT_CODE += $? ))
+# check whether the '--dso' option cuts the output to one dso only
+test `grep -c "Disassembly" $LOGS_DIR/basic_nosource.log` -ge 2 # FIXME wrong
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "no-source"
+(( TEST_RESULT += $? ))
+
+
+### full-paths
+
+# '-P' should print full paths of DSOs
+( cd $CURRENT_TEST_DIR ; $CMD_PERF annotate --stdio --dso load -P > $LOGS_DIR/basic_fullpaths.log 2> $LOGS_DIR/basic_fullpaths.err )
+PERF_EXIT_CODE=$?
+
+FULLPATH=`readlink -f $CURRENT_TEST_DIR/examples`
+../common/check_all_patterns_found.pl "$REGEX_HEADER $FULLPATH/load" "$REGEX_LINE" "$REGEX_SECTION__TEXT" < $LOGS_DIR/basic_fullpaths.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "full-paths"
+(( TEST_RESULT += $? ))
+
+
+### print-line
+
+# '--print-line' should print inline the source lines
+( cd $CURRENT_TEST_DIR ; $CMD_PERF annotate --stdio --dso load -P --print-line > $LOGS_DIR/basic_printline.log 2> $LOGS_DIR/basic_printline.err )
+PERF_EXIT_CODE=$?
+
+FULLPATH="`pwd`/examples"
+../common/check_all_patterns_found.pl "$FULLPATH/load\.c:$RE_NUMBER\s+$REGEX_LINE" "$REGEX_SECTION__TEXT" < $LOGS_DIR/basic_printline.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "print-line"
+(( TEST_RESULT += $? ))
+
+
+### redirected input
+
+# '-i dir/perf.data' should point to some other perf.data file
+mv $CURRENT_TEST_DIR/perf.data $CURRENT_TEST_DIR/examples/
+$CMD_PERF annotate --stdio --dso load -i $CURRENT_TEST_DIR/examples/perf.data > $LOGS_DIR/basic_input.log 2> $LOGS_DIR/basic_input.err
+PERF_EXIT_CODE=$?
+
+# the output should be the same as before
+diff -q $LOGS_DIR/basic_input.log $LOGS_DIR/basic_dso.log
+CHECK_EXIT_CODE=$?
+diff -q $LOGS_DIR/basic_input.err $LOGS_DIR/basic_dso.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "redirected input"
+(( TEST_RESULT += $? ))
+
+
+### execution without perf.data
+
+# check for error message
+! $CMD_PERF annotate > $LOGS_DIR/basic_nodata.log 2> $LOGS_DIR/basic_nodata.err
+PERF_EXIT_CODE=$?
+
+REGEX_NO_DATA="failed to open perf.data: No such file or directory"
+../common/check_all_lines_matched.pl "$REGEX_NO_DATA" < $LOGS_DIR/basic_nodata.err
+CHECK_EXIT_CODE=$?
+test ! -s $LOGS_DIR/basic_nodata.log
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "execution without data"
+(( TEST_RESULT += $? ))
+mv $CURRENT_TEST_DIR/examples/perf.data $CURRENT_TEST_DIR/
+
+
+# print overall resutls
+print_overall_results "$TEST_RESULT"
+exit $?
--
1.8.3.1
^ permalink raw reply related [flat|nested] 21+ messages in thread
* [PATCH 4/9] perf test: new testsuite: perf buildid-list tests
[not found] <cover.1458134357.git.mpetlan@redhat.com>
` (2 preceding siblings ...)
2016-03-16 13:54 ` [PATCH 3/9] perf test: new testsuite: perf annotate tests Michael Petlan
@ 2016-03-16 13:55 ` Michael Petlan
2016-03-16 13:55 ` [PATCH 5/9] perf test: new testsuite: perf list tests Michael Petlan
` (4 subsequent siblings)
8 siblings, 0 replies; 21+ messages in thread
From: Michael Petlan @ 2016-03-16 13:55 UTC (permalink / raw)
To: linux-perf-users; +Cc: acme, Jiri Olsa
This commit adds tests for perf buildid-list tool. It is a placeholder
for future perf buildid-cache tests as well.
Signed-off-by: Michael Petlan <mpetlan@redhat.com>
---
tools/perf/testsuite/base_buildid/cleanup.sh | 23 ++++++
tools/perf/testsuite/base_buildid/settings.sh | 45 ++++++++++++
tools/perf/testsuite/base_buildid/setup.sh | 26 +++++++
.../testsuite/base_buildid/test_buildid-list.sh | 81 ++++++++++++++++++++++
4 files changed, 175 insertions(+)
create mode 100755 tools/perf/testsuite/base_buildid/cleanup.sh
create mode 100644 tools/perf/testsuite/base_buildid/settings.sh
create mode 100755 tools/perf/testsuite/base_buildid/setup.sh
create mode 100755 tools/perf/testsuite/base_buildid/test_buildid-list.sh
diff --git a/tools/perf/testsuite/base_buildid/cleanup.sh b/tools/perf/testsuite/base_buildid/cleanup.sh
new file mode 100755
index 0000000..fb90cc3
--- /dev/null
+++ b/tools/perf/testsuite/base_buildid/cleanup.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+#
+# cleanup.sh of perf buildid test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+if [ ! -n "$PERFSUITE_RUN_DIR" ]; then
+ remove_buildid_cache
+ find . -name \*.log | xargs -r rm
+ find . -name \*.err | xargs -r rm
+ test -e perf.data && rm -rf perf.data
+else
+ mv "$BUILDIDDIR" "$PERFSUITE_RUN_DIR/perf_buildid-cache/"
+fi
+
+print_overall_results 0
+exit $?
diff --git a/tools/perf/testsuite/base_buildid/settings.sh b/tools/perf/testsuite/base_buildid/settings.sh
new file mode 100644
index 0000000..9bc0415
--- /dev/null
+++ b/tools/perf/testsuite/base_buildid/settings.sh
@@ -0,0 +1,45 @@
+#
+# settings.sh of perf buildid test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+# FIXME
+#
+#
+
+export TEST_NAME="perf_buildid-cache"
+export MY_ARCH=`arch`
+export MY_HOSTNAME=`hostname`
+export MY_KERNEL_VERSION=`uname -r`
+export MY_CPUS_ONLINE=`nproc`
+export MY_CPUS_AVAILABLE=`cat /proc/cpuinfo | grep -P "processor\s" | wc -l`
+
+export BUILDIDDIR=${BUILDIDDIR:-"$HOME/.debug-`date +%s`"}
+test -d "$BUILDIDDIR" || mkdir "$BUILDIDDIR"
+
+if [ -n "$PERFSUITE_RUN_DIR" ]; then
+ # when $PERFSUITE_RUN_DIR is set to something, all the logs and temp files will be placed there
+ # --> the $PERFSUITE_RUN_DIR/perf_something/examples and $PERFSUITE_RUN_DIR/perf_something/logs
+ # dirs will be used for that
+ export PERFSUITE_RUN_DIR=`readlink -f $PERFSUITE_RUN_DIR`
+ export CURRENT_TEST_DIR="$PERFSUITE_RUN_DIR/$TEST_NAME"
+ test -d "$CURRENT_TEST_DIR" || mkdir -p "$CURRENT_TEST_DIR"
+ export LOGS_DIR="$PERFSUITE_RUN_DIR/$TEST_NAME/logs"
+ test -d "$LOGS_DIR" || mkdir -p "$LOGS_DIR"
+else
+ # when $PERFSUITE_RUN_DIR is not set, logs will be placed here
+ export CURRENT_TEST_DIR="."
+ export LOGS_DIR="."
+fi
+
+
+clear_buildid_cache()
+{
+ rm -rf $BUILDIDDIR/*
+}
+
+remove_buildid_cache()
+{
+ clear_buildid_cache
+ rmdir $BUILDIDDIR
+}
diff --git a/tools/perf/testsuite/base_buildid/setup.sh b/tools/perf/testsuite/base_buildid/setup.sh
new file mode 100755
index 0000000..e9f6d56
--- /dev/null
+++ b/tools/perf/testsuite/base_buildid/setup.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+#
+# setup.sh of perf buildid test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# FIXME - maybe the setup is not necessary
+#
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+
+# clear the cache
+clear_buildid_cache
+
+# record some perf.data
+$CMD_PERF record -o $CURRENT_TEST_DIR/perf.data -a -- $CMD_LONGER_SLEEP &> $LOGS_DIR/setup.log
+
+print_overall_results $?
+exit $?
diff --git a/tools/perf/testsuite/base_buildid/test_buildid-list.sh b/tools/perf/testsuite/base_buildid/test_buildid-list.sh
new file mode 100755
index 0000000..4b40500
--- /dev/null
+++ b/tools/perf/testsuite/base_buildid/test_buildid-list.sh
@@ -0,0 +1,81 @@
+#!/bin/bash
+
+#
+# test_buildid-list of perf buildid test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test checks whether the buildids captured by perf record
+# and listed by perf buildid-list from the perf.data file matches
+# reality. Some other checks of the tool are done too.
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+TEST_RESULT=0
+
+test -s perf.data || ./setup.sh
+
+
+### help message
+
+if [ "$PARAM_GENERAL_HELP_TEXT_CHECK" = "y" ]; then
+ # test that a help message is shown and looks reasonable
+ $CMD_PERF buildid-list --help > $LOGS_DIR/list_helpmsg.log
+ PERF_EXIT_CODE=$?
+
+ ../common/check_all_patterns_found.pl "PERF-BUILDID-LIST" "NAME" "SYNOPSIS" "DESCRIPTION" "OPTIONS" "SEE ALSO" < $LOGS_DIR/list_helpmsg.log
+ CHECK_EXIT_CODE=$?
+ ../common/check_all_patterns_found.pl "perf\-buildid\-list \- List the buildids in a perf\.data file" < $LOGS_DIR/list_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "help message"
+ (( TEST_RESULT += $? ))
+else
+ print_testcase_skipped "help message"
+fi
+
+
+### buildids check
+
+# test that perf list is even working
+$CMD_PERF buildid-list -i $CURRENT_TEST_DIR/perf.data > $LOGS_DIR/list_buildids.log 2> $LOGS_DIR/list_buildids.err
+PERF_EXIT_CODE=$?
+
+# output sanity checks
+REGEX_LINE_BASIC="\w{40}\s+$RE_PATH"
+REGEX_LINE_KALLSYMS="\w{40}\s+\[kernel\.kallsyms\]"
+../common/check_all_lines_matched.pl "$REGEX_LINE_BASIC" "$REGEX_LINE_KALLSYMS" < $LOGS_DIR/list_buildids.log
+CHECK_EXIT_CODE=$?
+test ! -s $LOGS_DIR/basic_buildids.err
+(( CHECK_EXIT_CODE += $? ))
+
+# output semantics check
+../common/check_buildids_vs_files.pl < $LOGS_DIR/list_buildids.log
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "buildids check"
+(( TEST_RESULT += $? ))
+
+
+### kernel buildid
+
+# the --kernel option should print the buildid of the running kernel
+$CMD_PERF buildid-list --kernel > $LOGS_DIR/list_kernel.log
+PERF_EXIT_CODE=$?
+
+# check whether the buildid is printed
+../common/check_all_lines_matched.pl "\w{40}" < $LOGS_DIR/list_kernel.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "kernel buildid"
+(( TEST_RESULT += $? ))
+
+
+# print overall resutls
+print_overall_results "$TEST_RESULT"
+exit $?
--
1.8.3.1
^ permalink raw reply related [flat|nested] 21+ messages in thread
* [PATCH 5/9] perf test: new testsuite: perf list tests
[not found] <cover.1458134357.git.mpetlan@redhat.com>
` (3 preceding siblings ...)
2016-03-16 13:55 ` [PATCH 4/9] perf test: new testsuite: perf buildid-list tests Michael Petlan
@ 2016-03-16 13:55 ` Michael Petlan
2016-03-16 13:56 ` [PATCH 6/9] perf test: new testsuite: perf probe tests Michael Petlan
` (3 subsequent siblings)
8 siblings, 0 replies; 21+ messages in thread
From: Michael Petlan @ 2016-03-16 13:55 UTC (permalink / raw)
To: linux-perf-users; +Cc: acme, Jiri Olsa
This commit adds tests for perf list tool.
Signed-off-by: Michael Petlan <mpetlan@redhat.com>
---
tools/perf/testsuite/base_list/cleanup.sh | 22 ++++++
tools/perf/testsuite/base_list/settings.sh | 30 ++++++++
tools/perf/testsuite/base_list/setup.sh | 20 ++++++
tools/perf/testsuite/base_list/test_basic.sh | 101 +++++++++++++++++++++++++++
4 files changed, 173 insertions(+)
create mode 100755 tools/perf/testsuite/base_list/cleanup.sh
create mode 100644 tools/perf/testsuite/base_list/settings.sh
create mode 100755 tools/perf/testsuite/base_list/setup.sh
create mode 100755 tools/perf/testsuite/base_list/test_basic.sh
diff --git a/tools/perf/testsuite/base_list/cleanup.sh b/tools/perf/testsuite/base_list/cleanup.sh
new file mode 100755
index 0000000..d2ef647
--- /dev/null
+++ b/tools/perf/testsuite/base_list/cleanup.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+#
+# cleanup.sh of perf list test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+if [ -n "$PERFSUITE_RUN_DIR" ]; then
+ print_overall_skipped
+ exit 0
+fi
+
+find . -name \*.log | xargs -r rm
+find . -name \*.err | xargs -r rm
+
+print_overal_results 0
+exit $?
diff --git a/tools/perf/testsuite/base_list/settings.sh b/tools/perf/testsuite/base_list/settings.sh
new file mode 100644
index 0000000..eef343b
--- /dev/null
+++ b/tools/perf/testsuite/base_list/settings.sh
@@ -0,0 +1,30 @@
+#
+# settings.sh of perf_list test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+# FIXME
+#
+#
+
+export TEST_NAME="perf_list"
+export MY_ARCH=`arch`
+export MY_HOSTNAME=`hostname`
+export MY_KERNEL_VERSION=`uname -r`
+export MY_CPUS_ONLINE=`nproc`
+export MY_CPUS_AVAILABLE=`cat /proc/cpuinfo | grep -P "processor\s" | wc -l`
+
+if [ -n "$PERFSUITE_RUN_DIR" ]; then
+ # when $PERFSUITE_RUN_DIR is set to something, all the logs and temp files will be placed there
+ # --> the $PERFSUITE_RUN_DIR/perf_something/examples and $PERFSUITE_RUN_DIR/perf_something/logs
+ # dirs will be used for that
+ export PERFSUITE_RUN_DIR=`readlink -f $PERFSUITE_RUN_DIR`
+ export CURRENT_TEST_DIR="$PERFSUITE_RUN_DIR/$TEST_NAME"
+ test -d "$CURRENT_TEST_DIR" || mkdir -p "$CURRENT_TEST_DIR"
+ export LOGS_DIR="$PERFSUITE_RUN_DIR/$TEST_NAME/logs"
+ test -d "$LOGS_DIR" || mkdir -p "$LOGS_DIR"
+else
+ # when $PERFSUITE_RUN_DIR is not set, logs will be placed here
+ export CURRENT_TEST_DIR="."
+ export LOGS_DIR="."
+fi
diff --git a/tools/perf/testsuite/base_list/setup.sh b/tools/perf/testsuite/base_list/setup.sh
new file mode 100755
index 0000000..e254d05
--- /dev/null
+++ b/tools/perf/testsuite/base_list/setup.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+#
+# setup.sh of perf list test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# FIXME - maybe the setup is not necessary
+#
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+
+print_overall_results 0
+exit $?
diff --git a/tools/perf/testsuite/base_list/test_basic.sh b/tools/perf/testsuite/base_list/test_basic.sh
new file mode 100755
index 0000000..7eed587
--- /dev/null
+++ b/tools/perf/testsuite/base_list/test_basic.sh
@@ -0,0 +1,101 @@
+#!/bin/bash
+
+#
+# test_basic of perf_list test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test tests basic functionality of perf list command.
+#
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+TEST_RESULT=0
+
+
+### help message
+
+if [ "$PARAM_GENERAL_HELP_TEXT_CHECK" = "y" ]; then
+ # test that a help message is shown and looks reasonable
+ $CMD_PERF list --help > $LOGS_DIR/basic_helpmsg.log
+ PERF_EXIT_CODE=$?
+
+ ../common/check_all_patterns_found.pl "PERF-LIST" "NAME" "SYNOPSIS" "DESCRIPTION" "EVENT MODIFIERS" "RAW HARDWARE" "PARAMETERIZED EVENTS" "OPTIONS" "SEE ALSO" "NOTES" < $LOGS_DIR/basic_helpmsg.log
+ CHECK_EXIT_CODE=$?
+ ../common/check_all_patterns_found.pl "perf\-list \- List all symbolic event types" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "help message"
+ (( TEST_RESULT += $? ))
+else
+ print_testcase_skipped "help message"
+fi
+
+
+### basic execution
+
+# test that perf list is even working
+$CMD_PERF list > $LOGS_DIR/basic_basic.log 2> $LOGS_DIR/basic_basic.err
+PERF_EXIT_CODE=$?
+
+REGEX_LINE_HEADER="List of pre-defined events"
+REGEX_LINE_BASIC="\s*$RE_EVENT_ANY\s+(?:OR\s+$RE_EVENT_ANY\s+)?\[.*event.*\]"
+REGEX_LINE_BREAKPOINT="\s*mem:<addr>.*\s+\[Hardware breakpoint\]"
+REGEX_LINE_RAW="\[Raw hardware event descriptor\]"
+REGEX_LINE_AUX="see \'man perf\-list\' on how to encode it"
+../common/check_all_lines_matched.pl "$RE_LINE_EMPTY" "$REGEX_LINE_HEADER" "$REGEX_LINE_BASIC" "$REGEX_LINE_BREAKPOINT" "$REGEX_LINE_RAW" "$REGEX_LINE_AUX" < $LOGS_DIR/basic_basic.log
+CHECK_EXIT_CODE=$?
+test ! -s $LOGS_DIR/basic_basic.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "basic execution"
+(( TEST_RESULT += $? ))
+
+
+### listing event groups
+
+# perf list can filter the list by keywords or group globals
+declare -A outputs
+outputs["hw"]="Hardware event"
+outputs["sw"]="Software event"
+outputs["cache"]="Hardware cache event"
+outputs["tracepoint"]="Tracepoint event"
+outputs["pmu"]="Kernel PMU event"
+outputs["xfs:\*"]="^\s*xfs:"
+outputs["kmem:\*"]="^\s*kmem:"
+outputs["syscalls:\*"]="^\s*syscalls:sys"
+
+for i in ${!outputs[@]}; do
+ j=`echo $i | tr -d '\\\*:'`
+ $CMD_PERF list $i > $LOGS_DIR/basic_$j.log
+ PERF_EXIT_CODE=$?
+
+ ../common/check_all_lines_matched.pl "$REGEX_LINE_HEADER" "$RE_LINE_EMPTY" "${outputs[$i]}" < $LOGS_DIR/basic_$j.log
+ CHECK_EXIT_CODE=$?
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "list $i"
+ (( TEST_RESULT += $? ))
+done
+
+
+### non-sense argument
+
+# 'perf list somethingnonsense' caused segfault in 4.4-rc
+$CMD_PERF list somethingnonsense &> $LOGS_DIR/basic_nonsense.log
+PERF_EXIT_CODE=$?
+
+../common/check_no_patterns_found.pl "SIGSEGV" "egmentation fault" < $LOGS_DIR/basic_nonsense.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "non-sense argument"
+(( TEST_RESULT += $? ))
+
+
+# print overall resutls
+print_overall_results "$TEST_RESULT"
+exit $?
--
1.8.3.1
^ permalink raw reply related [flat|nested] 21+ messages in thread
* [PATCH 6/9] perf test: new testsuite: perf probe tests
[not found] <cover.1458134357.git.mpetlan@redhat.com>
` (4 preceding siblings ...)
2016-03-16 13:55 ` [PATCH 5/9] perf test: new testsuite: perf list tests Michael Petlan
@ 2016-03-16 13:56 ` Michael Petlan
2016-03-16 13:56 ` [PATCH 7/9] perf test: new testsuite: perf report tests Michael Petlan
` (2 subsequent siblings)
8 siblings, 0 replies; 21+ messages in thread
From: Michael Petlan @ 2016-03-16 13:56 UTC (permalink / raw)
To: linux-perf-users; +Cc: acme, Jiri Olsa
This commit adds tests for perf probe tool. Some of the tests are based
on Masami Hiramatsu's perf-probe tests.
Signed-off-by: Michael Petlan <mpetlan@redhat.com>
---
tools/perf/testsuite/base_probe/cleanup.sh | 22 ++
tools/perf/testsuite/base_probe/examples/Makefile | 16 ++
.../perf/testsuite/base_probe/examples/advanced.c | 40 ++++
.../testsuite/base_probe/examples/exact_counts.c | 35 ++++
tools/perf/testsuite/base_probe/examples/test.c | 35 ++++
tools/perf/testsuite/base_probe/settings.sh | 58 ++++++
tools/perf/testsuite/base_probe/setup.sh | 23 +++
.../base_probe/test_adding_blacklisted.sh | 63 ++++++
.../testsuite/base_probe/test_adding_kernel.sh | 229 +++++++++++++++++++++
tools/perf/testsuite/base_probe/test_advanced.sh | 124 +++++++++++
tools/perf/testsuite/base_probe/test_basic.sh | 79 +++++++
.../perf/testsuite/base_probe/test_exact_counts.sh | 106 ++++++++++
.../testsuite/base_probe/test_invalid_options.sh | 80 +++++++
.../testsuite/base_probe/test_line_semantics.sh | 56 +++++
tools/perf/testsuite/base_probe/test_listing.sh | 154 ++++++++++++++
.../perf/testsuite/base_probe/test_probe_syntax.sh | 119 +++++++++++
16 files changed, 1239 insertions(+)
create mode 100755 tools/perf/testsuite/base_probe/cleanup.sh
create mode 100644 tools/perf/testsuite/base_probe/examples/Makefile
create mode 100644 tools/perf/testsuite/base_probe/examples/advanced.c
create mode 100644 tools/perf/testsuite/base_probe/examples/exact_counts.c
create mode 100644 tools/perf/testsuite/base_probe/examples/test.c
create mode 100644 tools/perf/testsuite/base_probe/settings.sh
create mode 100755 tools/perf/testsuite/base_probe/setup.sh
create mode 100755 tools/perf/testsuite/base_probe/test_adding_blacklisted.sh
create mode 100755 tools/perf/testsuite/base_probe/test_adding_kernel.sh
create mode 100755 tools/perf/testsuite/base_probe/test_advanced.sh
create mode 100755 tools/perf/testsuite/base_probe/test_basic.sh
create mode 100755 tools/perf/testsuite/base_probe/test_exact_counts.sh
create mode 100755 tools/perf/testsuite/base_probe/test_invalid_options.sh
create mode 100755 tools/perf/testsuite/base_probe/test_line_semantics.sh
create mode 100755 tools/perf/testsuite/base_probe/test_listing.sh
create mode 100755 tools/perf/testsuite/base_probe/test_probe_syntax.sh
diff --git a/tools/perf/testsuite/base_probe/cleanup.sh b/tools/perf/testsuite/base_probe/cleanup.sh
new file mode 100755
index 0000000..ee5653e
--- /dev/null
+++ b/tools/perf/testsuite/base_probe/cleanup.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+#
+# cleanup.sh of perf probe test
+# Author: Michael Petlan <mpetlan@redhat.com>
+# Author: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+#
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+clear_all_probes
+if [ ! -n "$PERFSUITE_RUN_DIR" ]; then
+ find . -name \*.log | xargs -r rm
+ find . -name \*.err | xargs -r rm
+ make -s -C examples clean
+fi
+
+print_overall_results 0
+exit $?
diff --git a/tools/perf/testsuite/base_probe/examples/Makefile b/tools/perf/testsuite/base_probe/examples/Makefile
new file mode 100644
index 0000000..3248b6c
--- /dev/null
+++ b/tools/perf/testsuite/base_probe/examples/Makefile
@@ -0,0 +1,16 @@
+CC=gcc
+CFLAGS=-g -O0
+
+SRCS = $(wildcard *.c)
+PROGS = $(patsubst %.c,%,$(SRCS))
+
+all: $(PROGS)
+ifneq "$(MAKE_TARGET_DIR)x" 'x'
+ mv $(PROGS) $(MAKE_TARGET_DIR)/
+endif
+
+%: %.c
+ $(CC) $(CFLAGS) -o $@ $<
+
+clean:
+ rm -f $(PROGS)
diff --git a/tools/perf/testsuite/base_probe/examples/advanced.c b/tools/perf/testsuite/base_probe/examples/advanced.c
new file mode 100644
index 0000000..1851e4b
--- /dev/null
+++ b/tools/perf/testsuite/base_probe/examples/advanced.c
@@ -0,0 +1,40 @@
+#include <stdlib.h>
+#include <stdio.h>
+
+static int counter = 0;
+
+int incr(void)
+{
+ int a;
+ a = counter++ * 2;
+ return a;
+}
+
+int isprime(int a)
+{
+ int i;
+ if(a <= 1)
+ return 0;
+ for(i = 2; i <= a / 2; i++)
+ if(!(a % i))
+ return 0;
+ return 1;
+}
+
+int main(int argc, char **argv)
+{
+ int numbers[] = { 2, 3, 4, 5, 6, 7, 13, 17, 19 };
+ int i;
+
+ for(i = 0; i < 9; i++)
+ {
+ printf("%i %s prime\n", numbers[i], (isprime(numbers[i]))? "is" : "is not");
+ }
+
+ for(i = 0; i < 9; i++)
+ {
+ printf("Now the state is %i.\n", incr());
+ }
+
+ return 0;
+}
diff --git a/tools/perf/testsuite/base_probe/examples/exact_counts.c b/tools/perf/testsuite/base_probe/examples/exact_counts.c
new file mode 100644
index 0000000..f5cb8fa
--- /dev/null
+++ b/tools/perf/testsuite/base_probe/examples/exact_counts.c
@@ -0,0 +1,35 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+
+int f_1x(void) { return 1; }
+int f_2x(void) { return 2; }
+int f_3x(void) { return 3; }
+int f_103x(void) { return 103; }
+int f_997x(void) { return 997; }
+int f_65535x(void) {return 65535; }
+
+int main(int argc, char **argv)
+{
+ int i, a;
+
+ for(i = 0; i < 1; i++)
+ a = f_1x();
+
+ for(i = 0; i < 2; i++)
+ a = f_2x();
+
+ for(i = 0; i < 3; i++)
+ a = f_3x();
+
+ for(i = 0; i < 103; i++)
+ a = f_103x();
+
+ for(i = 0; i < 997; i++)
+ a = f_997x();
+
+ for(i = 0; i < 65535; i++)
+ a = f_65535x();
+
+ return 0;
+}
diff --git a/tools/perf/testsuite/base_probe/examples/test.c b/tools/perf/testsuite/base_probe/examples/test.c
new file mode 100644
index 0000000..5972f8e
--- /dev/null
+++ b/tools/perf/testsuite/base_probe/examples/test.c
@@ -0,0 +1,35 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+int some_function_with_a_really_long_name_that_must_be_longer_than_64_bytes(int some_argument_with_a_really_long_name_that_must_be_longer_than_64_bytes)
+{
+ int some_variable_with_a_really_long_name_that_must_be_longer_than_64_bytes = 0;
+ int i;
+
+ for(i = 0; i <= some_argument_with_a_really_long_name_that_must_be_longer_than_64_bytes; i++)
+ {
+ some_variable_with_a_really_long_name_that_must_be_longer_than_64_bytes += i;
+ }
+
+ return some_variable_with_a_really_long_name_that_must_be_longer_than_64_bytes;
+}
+
+int some_normal_function(int a)
+{
+ return a * a * a;
+}
+
+int main(int argc, char **argv)
+{
+ int x = 20, y, z;
+
+ if(argc > 1)
+ x = atoi(argv[1]);
+
+ y = some_function_with_a_really_long_name_that_must_be_longer_than_64_bytes(x);
+ z = some_normal_function(x);
+
+ printf("f1(%i) = %i\nf2(%i) = %i\n", x, y, x, z);
+
+ return 0;
+}
diff --git a/tools/perf/testsuite/base_probe/settings.sh b/tools/perf/testsuite/base_probe/settings.sh
new file mode 100644
index 0000000..8763ede
--- /dev/null
+++ b/tools/perf/testsuite/base_probe/settings.sh
@@ -0,0 +1,58 @@
+#
+# settings.sh of perf_report test
+# Author: Michael Petlan <mpetlan@redhat.com>
+# Author: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+#
+# Description:
+# FIXME
+#
+#
+
+export TEST_NAME="perf_probe"
+
+export MY_ARCH=`arch`
+
+if [ -n "$PERFSUITE_RUN_DIR" ]; then
+ # when $PERFSUITE_RUN_DIR is set to something, all the logs and temp files will be placed there
+ # --> the $PERFSUITE_RUN_DIR/perf_something/examples and $PERFSUITE_RUN_DIR/perf_something/logs
+ # dirs will be used for that
+ export PERFSUITE_RUN_DIR=`readlink -f $PERFSUITE_RUN_DIR`
+ export CURRENT_TEST_DIR="$PERFSUITE_RUN_DIR/$TEST_NAME"
+ export MAKE_TARGET_DIR="$CURRENT_TEST_DIR/examples"
+ test -d "$MAKE_TARGET_DIR" || mkdir -p "$MAKE_TARGET_DIR"
+ export LOGS_DIR="$PERFSUITE_RUN_DIR/$TEST_NAME/logs"
+ test -d "$LOGS_DIR" || mkdir -p "$LOGS_DIR"
+else
+ # when $PERFSUITE_RUN_DIR is not set, logs will be placed here
+ export CURRENT_TEST_DIR="."
+ export LOGS_DIR="."
+fi
+
+check_kprobes_available()
+{
+ grep -q kprobe_register /proc/kallsyms
+}
+
+check_uprobes_available()
+{
+ grep -q uprobe_register /proc/kallsyms
+}
+
+clear_all_probes()
+{
+ echo 0 > /sys/kernel/debug/tracing/events/enable
+ check_kprobes_available && echo > /sys/kernel/debug/tracing/kprobe_events
+ check_uprobes_available && echo > /sys/kernel/debug/tracing/uprobe_events
+}
+
+# FIXME
+check_perf_probe_option()
+{ #option
+ $PERF probe -h 2>&1 | egrep '[\t ]+'$1'[\t ]+' > /dev/null
+}
+
+#FIXME
+check_kernel_debuginfo()
+{
+ eu-addr2line -k 0x`grep -m 1 vfs_read /proc/kallsyms | cut -f 1 -d" "` | grep vfs_read
+}
diff --git a/tools/perf/testsuite/base_probe/setup.sh b/tools/perf/testsuite/base_probe/setup.sh
new file mode 100755
index 0000000..8f32bc2
--- /dev/null
+++ b/tools/perf/testsuite/base_probe/setup.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+#
+# setup.sh of perf probe test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# We need to clean-up all the previously added probes
+# FIXME
+#
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+
+make -s -C examples
+
+print_results $? 0 "building examples"
+exit $?
diff --git a/tools/perf/testsuite/base_probe/test_adding_blacklisted.sh b/tools/perf/testsuite/base_probe/test_adding_blacklisted.sh
new file mode 100755
index 0000000..b79c238
--- /dev/null
+++ b/tools/perf/testsuite/base_probe/test_adding_blacklisted.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+#
+# test_adding_blacklisted of perf_probe test
+# Author: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# Blacklisted functions should not be added successfully as probes,
+# they must be skipped.
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+TEST_RESULT=0
+
+# skip if not supported
+BLACKFUNC=`head -n 1 /sys/kernel/debug/kprobes/blacklist 2> /dev/null | cut -f2`
+if [ -z "$BLACKFUNC" ]; then
+ print_overall_skipped
+ exit 0
+fi
+
+# remove all previously added probes
+clear_all_probes
+
+
+### adding blacklisted function
+
+# functions from blacklist should be skipped by perf probe
+! $CMD_PERF probe $BLACKFUNC > $LOGS_DIR/adding_blacklisted.log 2> $LOGS_DIR/adding_blacklisted.err
+PERF_EXIT_CODE=$?
+
+REGEX_SKIP_MESSAGE=" is blacklisted function, skip it\."
+REGEX_NOT_FOUND_MESSAGE="Probe point \'$BLACKFUNC\' not found."
+REGEX_ERROR_MESSAGE="Error: Failed to add events."
+../common/check_all_lines_matched.pl "$REGEX_SKIP_MESSAGE" "$REGEX_NOT_FOUND_MESSAGE" "$REGEX_ERROR_MESSAGE" < $LOGS_DIR/adding_blacklisted.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "adding blacklisted function $BLACKFUNC"
+(( TEST_RESULT += $? ))
+
+
+### listing not-added probe
+
+# blacklisted probes should NOT appear in perf-list output
+$CMD_PERF list probe:\* > $LOGS_DIR/adding_blacklisted_list.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "$RE_LINE_EMPTY" "List of pre-defined events" < $LOGS_DIR/adding_blacklisted_list.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing blacklisted probe (should NOT be listed)"
+(( TEST_RESULT += $? ))
+
+
+# print overall resutls
+print_overall_results "$TEST_RESULT"
+exit $?
diff --git a/tools/perf/testsuite/base_probe/test_adding_kernel.sh b/tools/perf/testsuite/base_probe/test_adding_kernel.sh
new file mode 100755
index 0000000..34f0109
--- /dev/null
+++ b/tools/perf/testsuite/base_probe/test_adding_kernel.sh
@@ -0,0 +1,229 @@
+#!/bin/bash
+
+#
+# test_adding_kernel of perf_probe test
+# Author: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test tests adding of probes, their correct listing
+# and removing.
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+TEST_RESULT=0
+
+TEST_PROBE="vfs_read"
+
+check_kprobes_available
+if [ $? -ne 0 ]; then
+ print_overall_skipped
+ exit 0
+fi
+
+
+### basic probe adding
+
+for opt in "" "-a" "--add"; do
+ clear_all_probes
+ $CMD_PERF probe $opt $TEST_PROBE 2> $LOGS_DIR/adding_kernel_add$opt.err
+ PERF_EXIT_CODE=$?
+
+ ../common/check_all_patterns_found.pl "Added new event:" "probe:$TEST_PROBE" "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_add$opt.err
+ CHECK_EXIT_CODE=$?
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "adding probe $TEST_PROBE :: $opt"
+ (( TEST_RESULT += $? ))
+done
+
+
+### listing added probe :: perf list
+
+# any added probes should appear in perf-list output
+$CMD_PERF list probe:\* > $LOGS_DIR/adding_kernel_list.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "$RE_LINE_EMPTY" "List of pre-defined events" "probe:$TEST_PROBE\s+\[Tracepoint event\]" < $LOGS_DIR/adding_kernel_list.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing added probe :: perf list"
+(( TEST_RESULT += $? ))
+
+
+### listing added probe :: perf probe -l
+
+# '-l' should list all the added probes as well
+$CMD_PERF probe -l > $LOGS_DIR/adding_kernel_list-l.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "\s*probe:$TEST_PROBE\s+\(on $TEST_PROBE@.+\)" < $LOGS_DIR/adding_kernel_list-l.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing added probe :: perf probe -l"
+(( TEST_RESULT += $? ))
+
+
+### using added probe
+
+$CMD_PERF stat -e probe:$TEST_PROBE -o $LOGS_DIR/adding_kernel_using_probe.log -- cat /proc/uptime > /dev/null
+PERF_EXIT_CODE=$?
+
+REGEX_STAT_HEADER="\s*Performance counter stats for \'cat /proc/uptime\':"
+# the value should be greater than 1
+REGEX_STAT_VALUES="\s*[1-9][0-9]*\s+probe:$TEST_PROBE"
+REGEX_STAT_TIME="\s*$RE_NUMBER\s+seconds time elapsed"
+../common/check_all_lines_matched.pl "$REGEX_STAT_HEADER" "$REGEX_STAT_VALUES" "$REGEX_STAT_TIME" "$RE_LINE_COMMENT" "$RE_LINE_EMPTY" < $LOGS_DIR/adding_kernel_using_probe.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "using added probe"
+(( TEST_RESULT += $? ))
+
+
+### removing added probe
+
+# '-d' should remove the probe
+$CMD_PERF probe -d $TEST_PROBE 2> $LOGS_DIR/adding_kernel_removing.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "Removed event: probe:$TEST_PROBE" < $LOGS_DIR/adding_kernel_removing.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "deleting added probe"
+(( TEST_RESULT += $? ))
+
+
+### listing removed probe
+
+# removed probes should NOT appear in perf-list output
+$CMD_PERF list probe:\* > $LOGS_DIR/adding_kernel_list_removed.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "$RE_LINE_EMPTY" "List of pre-defined events" < $LOGS_DIR/adding_kernel_list_removed.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing removed probe (should NOT be listed)"
+(( TEST_RESULT += $? ))
+
+
+### dry run
+
+# the '-n' switch should run it in dry mode
+$CMD_PERF probe -n --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_dryrun.err
+PERF_EXIT_CODE=$?
+
+# check for the output (should be the same as usual)
+../common/check_all_patterns_found.pl "Added new event:" "probe:$TEST_PROBE" "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_dryrun.err
+CHECK_EXIT_CODE=$?
+
+# check that no probe was added in real
+! ( $CMD_PERF probe -l | grep "probe:$TEST_PROBE" )
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "dry run :: adding probe"
+(( TEST_RESULT += $? ))
+
+
+### force-adding probes
+
+# when using '--force' a probe should be added even if it is already there
+$CMD_PERF probe --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_forceadd_01.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "Added new event:" "probe:$TEST_PROBE" "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_forceadd_01.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "force-adding probes :: first probe adding"
+(( TEST_RESULT += $? ))
+
+# adding existing probe without '--force' should fail
+! $CMD_PERF probe --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_forceadd_02.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "Error: event \"$TEST_PROBE\" already exists." "Error: Failed to add events." < $LOGS_DIR/adding_kernel_forceadd_02.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "force-adding probes :: second probe adding (without force)"
+(( TEST_RESULT += $? ))
+
+# adding existing probe with '--force' should pass
+$CMD_PERF probe --force --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_forceadd_03.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "Added new event:" "probe:${TEST_PROBE}_1" "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_forceadd_03.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "force-adding probes :: second probe adding (with force)"
+(( TEST_RESULT += $? ))
+
+
+### using doubled probe
+
+# since they are the same, they should produce the same results
+$CMD_PERF stat -e probe:$TEST_PROBE -e probe:${TEST_PROBE}_1 -x';' -o $LOGS_DIR/adding_kernel_using_two.log -- bash -c 'cat /proc/cpuinfo > /dev/null'
+PERF_EXIT_CODE=$?
+
+REGEX_LINE="$RE_NUMBER;+probe:${TEST_PROBE}_?1?;$RE_NUMBER;$RE_NUMBER"
+../common/check_all_lines_matched.pl "$REGEX_LINE" "$RE_LINE_EMPTY" "$RE_LINE_COMMENT" < $LOGS_DIR/adding_kernel_using_two.log
+CHECK_EXIT_CODE=$?
+
+VALUE_1=`grep "$TEST_PROBE;" $LOGS_DIR/adding_kernel_using_two.log | awk -F';' '{print $1}'`
+VALUE_2=`grep "${TEST_PROBE}_1;" $LOGS_DIR/adding_kernel_using_two.log | awk -F';' '{print $1}'`
+
+test $VALUE_1 -eq $VALUE_2
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "using doubled probe"
+
+
+### removing multiple probes
+
+# using wildcards should remove all matching probes
+$CMD_PERF probe --del \* 2> $LOGS_DIR/adding_kernel_removing_wildcard.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "Removed event: probe:$TEST_PROBE" "Removed event: probe:${TEST_PROBE}_1" < $LOGS_DIR/adding_kernel_removing_wildcard.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "removing multiple probes"
+(( TEST_RESULT += $? ))
+
+
+### wildcard adding support
+
+$CMD_PERF probe -nf -a 'vfs_* $params' 2> $LOGS_DIR/adding_kernel_adding_wildcard.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "probe:vfs_mknod" "probe:vfs_create" "probe:vfs_rmdir" "probe:vfs_link" "probe:vfs_write" < $LOGS_DIR/adding_kernel_adding_wildcard.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "wildcard adding support"
+(( TEST_RESULT += $? ))
+
+
+### out-of-text functions
+
+# out-of-text functions should be skipped
+INITTEXT=init_setup
+grep -q " $INITTEXT" /proc/kallsyms
+if [ $? -eq 0 -a "$MY_ARCH" = "x86_64" ]; then
+ ! $CMD_PERF probe $INITTEXT 2> $LOGS_DIR/adding_kernel_outoftext.err
+ PERF_EXIT_CODE=$?
+
+ ../common/check_all_patterns_found.pl "init_setup is out of \.text, skip it" < $LOGS_DIR/adding_kernel_outoftext.err
+ CHECK_EXIT_CODE=$?
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "out-of-text functions"
+ (( TEST_RESULT += $? ))
+else
+ print_testcase_skipped "out-of-text functions"
+fi
+
+
+# print overall resutls
+print_overall_results "$TEST_RESULT"
+exit $?
diff --git a/tools/perf/testsuite/base_probe/test_advanced.sh b/tools/perf/testsuite/base_probe/test_advanced.sh
new file mode 100755
index 0000000..9f2e5f1
--- /dev/null
+++ b/tools/perf/testsuite/base_probe/test_advanced.sh
@@ -0,0 +1,124 @@
+#!/bin/bash
+
+#
+# test_advanced.sh of perf_probe test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This testcase tries some more advanced probes, capturing
+# values of variables, registers etc. The perf-script tool is
+# used for processing the results.
+#
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+TEST_RESULT=0
+
+check_uprobes_available
+if [ $? -ne 0 ]; then
+ print_overall_skipped
+ exit 0
+fi
+
+# clean up before we start
+clear_all_probes
+find . -name perf.data\* | xargs -r rm
+
+
+### function argument probing :: add
+
+# we want to trace values of the variable (argument) 'a' along with the function calls
+$CMD_PERF probe -x $CURRENT_TEST_DIR/examples/advanced --add 'isprime a' > $LOGS_DIR/advanced_funcargs_add.log 2>&1
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "probe_advanced:isprime" < $LOGS_DIR/advanced_funcargs_add.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "function argument probing :: add"
+(( TEST_RESULT += $? ))
+
+
+### function argument probing :: use
+
+# perf record should catch samples including the argument's value
+$CMD_PERF record -e 'probe_advanced:isprime' -o $CURRENT_TEST_DIR/perf.data $CURRENT_TEST_DIR/examples/advanced > /dev/null 2> $LOGS_DIR/advanced_funcargs_record.log
+PERF_EXIT_CODE=$?
+
+# perf record should catch exactly 9 samples
+../common/check_all_patterns_found.pl "$RE_LINE_RECORD1" "$RE_LINE_RECORD2" "9 samples" < $LOGS_DIR/advanced_funcargs_record.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "function argument probing :: record"
+(( TEST_RESULT += $? ))
+
+# perf script should report the function calls with the correct arg values
+$CMD_PERF script -i $CURRENT_TEST_DIR/perf.data > $LOGS_DIR/advanced_funcargs_script.log
+PERF_EXIT_CODE=$?
+
+# checking for the perf script output sanity
+REGEX_SCRIPT_LINE="\s*advanced\s+$RE_NUMBER\s+\[$RE_NUMBER\]\s+$RE_NUMBER:\s+probe_advanced:isprime:\s+\($RE_NUMBER\) a=$RE_NUMBER"
+../common/check_all_lines_matched.pl "$REGEX_SCRIPT_LINE" < $LOGS_DIR/advanced_funcargs_script.log
+CHECK_EXIT_CODE=$?
+
+# checking whether the values are really correct
+../common/check_exact_pattern_order.pl "a=2" "a=3" "a=4" "a=5" "a=6" "a=7" "a=13" "a=17" "a=19" < $LOGS_DIR/advanced_funcargs_script.log
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "function argument probing :: script"
+(( TEST_RESULT += $? ))
+
+
+### function retval probing :: add
+
+# we want to trace return values of the function along with the function calls
+$CMD_PERF probe -x $CURRENT_TEST_DIR/examples/advanced --add 'incr%return $retval' > $LOGS_DIR/advanced_funcretval_add.log 2>&1
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "probe_advanced:incr" < $LOGS_DIR/advanced_funcretval_add.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "function retval probing :: add"
+(( TEST_RESULT += $? ))
+
+
+### function retval probing :: use
+
+# perf record should catch samples including the function return value
+$CMD_PERF record -e 'probe_advanced:incr' -o $CURRENT_TEST_DIR/perf.data $CURRENT_TEST_DIR/examples/advanced > /dev/null 2> $LOGS_DIR/advanced_funcretval_record.log
+PERF_EXIT_CODE=$?
+
+# perf record should catch exactly 9 samples
+../common/check_all_patterns_found.pl "$RE_LINE_RECORD1" "$RE_LINE_RECORD2" "9 samples" < $LOGS_DIR/advanced_funcretval_record.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "function retval probing :: record"
+(( TEST_RESULT += $? ))
+
+# perf script should report the function calls with the correct return values
+$CMD_PERF script -i $CURRENT_TEST_DIR/perf.data > $LOGS_DIR/advanced_funcretval_script.log
+PERF_EXIT_CODE=$?
+
+# checking for the perf script output sanity
+REGEX_SCRIPT_LINE="\s*advanced\s+$RE_NUMBER\s+\[$RE_NUMBER\]\s+$RE_NUMBER:\s+probe_advanced:incr:\s+\($RE_NUMBER_HEX\s+<\-\s+$RE_NUMBER_HEX\) arg1=0x$RE_NUMBER_HEX"
+../common/check_all_lines_matched.pl "$REGEX_SCRIPT_LINE" < $LOGS_DIR/advanced_funcretval_script.log
+CHECK_EXIT_CODE=$?
+
+# checking whether the values are really correct
+../common/check_exact_pattern_order.pl "arg1=0x0" "arg1=0x2" "arg1=0x4" "arg1=0x6" "arg1=0x8" "arg1=0xa" "arg1=0xc" "arg1=0xe" "arg1=0x10" < $LOGS_DIR/advanced_funcretval_script.log
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "function retval probing :: script"
+(( TEST_RESULT += $? ))
+
+
+clear_all_probes
+
+
+# print overall resutls
+print_overall_results "$TEST_RESULT"
+exit $?
diff --git a/tools/perf/testsuite/base_probe/test_basic.sh b/tools/perf/testsuite/base_probe/test_basic.sh
new file mode 100755
index 0000000..8f6ea13
--- /dev/null
+++ b/tools/perf/testsuite/base_probe/test_basic.sh
@@ -0,0 +1,79 @@
+#!/bin/bash
+
+#
+# test_basic of perf_probe test
+# Author: Michael Petlan <mpetlan@redhat.com>
+# Author: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+#
+# Description:
+#
+# This test tests basic functionality of perf probe command.
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+TEST_RESULT=0
+
+check_kprobes_available
+if [ $? -ne 0 ]; then
+ print_overall_skipped
+ exit 0
+fi
+
+
+### help message
+
+if [ "$PARAM_GENERAL_HELP_TEXT_CHECK" = "y" ]; then
+ # test that a help message is shown and looks reasonable
+ $CMD_PERF probe --help > $LOGS_DIR/basic_helpmsg.log
+ PERF_EXIT_CODE=$?
+
+ ../common/check_all_patterns_found.pl "PERF-PROBE" "NAME" "SYNOPSIS" "DESCRIPTION" "OPTIONS" "PROBE\s+SYNTAX" "PROBE\s+ARGUMENT" "LINE\s+SYNTAX" < $LOGS_DIR/basic_helpmsg.log
+ CHECK_EXIT_CODE=$?
+ ../common/check_all_patterns_found.pl "LAZY\s+MATCHING" "FILTER\s+PATTERN" "EXAMPLES" "SEE\s+ALSO" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_all_patterns_found.pl "vmlinux" "module=" "source=" "verbose" "quiet" "add=" "del=" "list.*EVENT" "line=" "vars=" "externs" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_all_patterns_found.pl "no-inlines" "funcs.*FILTER" "filter=FILTER" "force" "dry-run" "max-probes" "exec=" "demangle-kernel" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "help message"
+ (( TEST_RESULT += $? ))
+else
+ print_testcase_skipped "help message"
+fi
+
+
+### usage message
+
+# without any args perf-probe should print usage
+$CMD_PERF probe 2> $LOGS_DIR/basic_usage.log > /dev/null
+
+../common/check_all_patterns_found.pl "[Uu]sage" "perf probe" "verbose" "quiet" "add" "del" "force" "line" "vars" "externs" "range" < $LOGS_DIR/basic_usage.log
+CHECK_EXIT_CODE=$?
+
+print_results 0 $CHECK_EXIT_CODE "usage message"
+(( TEST_RESULT += $? ))
+
+
+### quiet switch
+
+# '--quiet' should mute all output
+$CMD_PERF probe --quiet --add vfs_read > $LOGS_DIR/basic_quiet01.log 2> $LOGS_DIR/basic_quiet01.err
+PERF_EXIT_CODE=$?
+$CMD_PERF probe --quiet --del vfs_read > $LOGS_DIR/basic_quiet03.log 2> $LOGS_DIR/basic_quiet02.err
+(( PERF_EXIT_CODE += $? ))
+
+test `cat $LOGS_DIR/basic_quiet*log $LOGS_DIR/basic_quiet*err | wc -l` -eq 0
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "quiet switch"
+(( TEST_RESULT += $? ))
+
+
+# print overall resutls
+print_overall_results "$TEST_RESULT"
+exit $?
diff --git a/tools/perf/testsuite/base_probe/test_exact_counts.sh b/tools/perf/testsuite/base_probe/test_exact_counts.sh
new file mode 100755
index 0000000..2b8aa63
--- /dev/null
+++ b/tools/perf/testsuite/base_probe/test_exact_counts.sh
@@ -0,0 +1,106 @@
+#!/bin/bash
+
+#
+# test_exact_counts of perf_probe test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This testcase checks, whether the perf-stat catches all
+# the probes with exactly known counts of function calls.
+#
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+TEST_RESULT=0
+
+check_uprobes_available
+if [ $? -ne 0 ]; then
+ print_overall_skipped
+ exit 0
+fi
+
+# clean up before we start
+clear_all_probes
+find . -name perf.data\* | xargs -r rm
+
+
+### adding userspace probes
+
+PERF_EXIT_CODE=0
+test -e $LOGS_DIR/exact_counts_add.log && rm -f $LOGS_DIR/exact_counts_add.log
+for i in 1 2 3 103 997 65535; do
+ $CMD_PERF probe -x $CURRENT_TEST_DIR/examples/exact_counts --add f_${i}x >> $LOGS_DIR/exact_counts_add.log 2>&1
+ (( PERF_EXIT_CODE += $? ))
+done
+
+../common/check_all_patterns_found.pl "probe_exact:f_1x" "probe_exact:f_2x" "probe_exact:f_3x" "probe_exact:f_103x" \
+ "probe_exact:f_997x" "probe_exact:f_65535x" < $LOGS_DIR/exact_counts_add.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "adding userspace probes"
+(( TEST_RESULT += $? ))
+
+
+### listing added probes
+
+$CMD_PERF probe -l > $LOGS_DIR/exact_counts_list.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "probe_exact:f_1x" "probe_exact:f_2x" "probe_exact:f_3x" "probe_exact:f_103x" \
+ "probe_exact:f_997x" "probe_exact:f_65535x" < $LOGS_DIR/exact_counts_list.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing added probes"
+(( TEST_RESULT += $? ))
+
+
+### using probes :: perf stat
+
+# perf stat should catch all the events and give exact results
+$CMD_PERF stat -x';' -e 'probe_exact:*' $CURRENT_TEST_DIR/examples/exact_counts 2> $LOGS_DIR/exact_counts_stat.log
+PERF_EXIT_CODE=$?
+
+# check for exact values in perf stat results
+../common/check_all_lines_matched.pl "(\d+);+probe_exact:f_\1x" < $LOGS_DIR/exact_counts_stat.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "using probes :: perf stat"
+(( TEST_RESULT += $? ))
+
+
+### using probes :: perf record
+
+# perf record should catch all the samples as well
+$CMD_PERF record -e 'probe_exact:*' -o $CURRENT_TEST_DIR/perf.data $CURRENT_TEST_DIR/examples/exact_counts 2> $LOGS_DIR/exact_counts_record.log
+PERF_EXIT_CODE=$?
+
+# perf record should catch exactly 66641 samples
+../common/check_all_patterns_found.pl "$RE_LINE_RECORD1" "$RE_LINE_RECORD2" "66641 samples" < $LOGS_DIR/exact_counts_record.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "using probes :: perf record"
+(( TEST_RESULT += $? ))
+
+# perf report should report exact values too
+$CMD_PERF report --stdio -i $CURRENT_TEST_DIR/perf.data -n > $LOGS_DIR/exact_counts_report.log
+PERF_EXIT_CODE=$?
+
+# perf report should report exact sample counts
+../common/check_all_lines_matched.pl "\s*100.00%\s+(\d+)\s+exact_counts\s+exact_counts\s+\[\.\]\s+f_\1x" "$RE_LINE_EMPTY" "$RE_LINE_COMMENT" < $LOGS_DIR/exact_counts_report.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "using probes :: perf report"
+(( TEST_RESULT += $? ))
+
+
+clear_all_probes
+
+
+# print overall resutls
+print_overall_results "$TEST_RESULT"
+exit $?
diff --git a/tools/perf/testsuite/base_probe/test_invalid_options.sh b/tools/perf/testsuite/base_probe/test_invalid_options.sh
new file mode 100755
index 0000000..2b83062
--- /dev/null
+++ b/tools/perf/testsuite/base_probe/test_invalid_options.sh
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+#
+# test_invalid_options of perf_probe test
+# Author: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test checks whether the invalid and incompatible options are reported
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+TEST_RESULT=0
+
+check_kprobes_available
+if [ $? -ne 0 ]; then
+ print_overall_skipped
+ exit 0
+fi
+
+
+### missing argument
+
+# some options require an argument
+for opt in '-a' '-d' '-L' '-V'; do
+ ! $CMD_PERF probe $opt 2> $LOGS_DIR/invalid_options_missing_argument$opt.err
+ PERF_EXIT_CODE=$?
+
+ ../common/check_all_patterns_found.pl "Error: switch .* requires a value" < $LOGS_DIR/invalid_options_missing_argument$opt.err
+ CHECK_EXIT_CODE=$?
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "missing argument for $opt"
+ (( TEST_RESULT += $? ))
+done
+
+
+### unnecessary argument
+
+# some options may omit the argument
+for opt in '-F' '-l'; do
+ $CMD_PERF probe -F > /dev/null 2> $LOGS_DIR/invalid_options_unnecessary_argument$opt.err
+ PERF_EXIT_CODE=$?
+
+ test ! -s $LOGS_DIR/invalid_options_unnecessary_argument$opt.err
+ CHECK_EXIT_CODE=$?
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "unnecessary argument for $opt"
+ (( TEST_RESULT += $? ))
+done
+
+
+### mutually exclusive options
+
+# some options are mutually exclusive
+test -e $LOGS_DIR/invalid_options_mutually_exclusive.log && rm -f $LOGS_DIR/invalid_options_mutually_exclusive.log
+for opt in '-a xxx -d xxx' '-a xxx -L foo' '-a xxx -V foo' '-a xxx -l' '-a xxx -F' \
+ '-d xxx -L foo' '-d xxx -V foo' '-d xxx -l' '-d xxx -F' \
+ '-L foo -V bar' '-L foo -l' '-L foo -F' '-V foo -l' '-V foo -F' '-l -F'; do
+ ! $CMD_PERF probe $opt 2> $LOGS_DIR/aux.log
+ PERF_EXIT_CODE=$?
+
+ ../common/check_all_patterns_found.pl "Error: switch .+ cannot be used with switch .+" < $LOGS_DIR/aux.log
+ CHECK_EXIT_CODE=$?
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "mutually exclusive options :: $opt"
+ (( TEST_RESULT += $? ))
+
+ # gather the logs
+ cat $LOGS_DIR/aux.log | grep "Error" >> $LOGS_DIR/invalid_options_mutually_exclusive.log
+done
+
+
+# print overall resutls
+print_overall_results "$TEST_RESULT"
+exit $?
diff --git a/tools/perf/testsuite/base_probe/test_line_semantics.sh b/tools/perf/testsuite/base_probe/test_line_semantics.sh
new file mode 100755
index 0000000..edda024
--- /dev/null
+++ b/tools/perf/testsuite/base_probe/test_line_semantics.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+#
+# test_line_semantics of perf_probe test
+# Author: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test checks whether the semantic errors of line option's
+# arguments are properly reported.
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+TEST_RESULT=0
+
+check_kprobes_available
+if [ $? -ne 0 ]; then
+ print_overall_skipped
+ exit 0
+fi
+
+
+### acceptable --line descriptions
+
+# testing acceptance of valid patterns for the '--line' option
+VALID_PATTERNS="func func:10 func:0-10 func:2+10 func@source.c func@source.c:1 source.c:1 source.c:1+1 source.c:1-10"
+for desc in $VALID_PATTERNS; do
+ ! ( $CMD_PERF probe --line $desc 2>&1 | grep -q "Semantic error" )
+ CHECK_EXIT_CODE=$?
+
+ print_results 0 $CHECK_EXIT_CODE "acceptable descriptions :: $desc"
+ (( TEST_RESULT += $? ))
+done
+
+
+### unacceptable --line descriptions
+
+# testing handling of invalid patterns for the '--line' option
+INVALID_PATTERNS="func:foo func:1-foo func:1+foo func;lazy\*pattern"
+for desc in $INVALID_PATTERNS; do
+ $CMD_PERF probe --line $desc 2>&1 | grep -q "Semantic error"
+ CHECK_EXIT_CODE=$?
+
+ print_results 0 $CHECK_EXIT_CODE "unacceptable descriptions :: $desc"
+ (( TEST_RESULT += $? ))
+done
+
+
+# print overall resutls
+print_overall_results "$TEST_RESULT"
+exit $?
diff --git a/tools/perf/testsuite/base_probe/test_listing.sh b/tools/perf/testsuite/base_probe/test_listing.sh
new file mode 100755
index 0000000..51cd471
--- /dev/null
+++ b/tools/perf/testsuite/base_probe/test_listing.sh
@@ -0,0 +1,154 @@
+#!/bin/bash
+
+#
+# test_listing of perf_probe test
+# Author: Michael Petlan <mpetlan@redhat.com>
+# Author: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+#
+# Description:
+#
+# This test tests various listings of the perf-probe command
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+TEST_RESULT=0
+
+check_kprobes_available
+if [ $? -ne 0 ]; then
+ print_overall_skipped
+ exit 0
+fi
+
+check_uprobes_available
+if [ $? -ne 0 ]; then
+ print_overall_skipped
+ exit 0
+fi
+
+
+### kernel functions list
+
+# the '-F' option should list all the available kernel functions for probing
+$CMD_PERF probe -F > $LOGS_DIR/listing_kernel_functions.log
+PERF_EXIT_CODE=$?
+
+RATE=`../common/check_kallsyms_vs_probes.pl /proc/kallsyms $LOGS_DIR/listing_kernel_functions.log`
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "kernel functions list ($RATE to kallsyms)"
+(( TEST_RESULT += $? ))
+
+
+### userspace functions list
+
+# with '-x binary' the '-F' option should inspect the binary instead of kernel
+$CMD_PERF probe -x $CURRENT_TEST_DIR/examples/exact_counts -F > $LOGS_DIR/listing_userspace_functions.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "f_103x" "f_1x" "f_2x" "f_3x" "f_65535x" "f_997x" "main" < $LOGS_DIR/listing_userspace_functions.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "userspace functions list"
+(( TEST_RESULT += $? ))
+
+
+### kernel variables list
+
+# the '-V' option should list all the available variables for a function/line
+$CMD_PERF probe -V vfs_read > $LOGS_DIR/listing_kernel_variables.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "Available variables at vfs_read" "char\s*\*\s*buf" "pos" "size_t\s+count" "struct\s+file\s*\*\s*file" < $LOGS_DIR/listing_kernel_variables.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "kernel variables list"
+(( TEST_RESULT += $? ))
+
+
+### userspace variables list
+
+# with '-x binary' the '-V' option should inspect the binary for variables available in a function
+LONG_FUNC="some_function_with_a_really_long_name_that_must_be_longer_than_64_bytes"
+$CMD_PERF probe -x $CURRENT_TEST_DIR/examples/test -V $LONG_FUNC > $LOGS_DIR/listing_userspace_variables.log
+PERF_EXIT_CODE=$?
+
+LONG_VAR="some_variable_with_a_really_long_name_that_must_be_longer_than_64_bytes"
+LONG_ARG="some_argument_with_a_really_long_name_that_must_be_longer_than_64_bytes"
+../common/check_all_patterns_found.pl "Available variables at $LONG_FUNC" "int\s+i" "int\s+$LONG_VAR" "int\s+$LONG_ARG" < $LOGS_DIR/listing_userspace_variables.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "userspace variables list"
+(( TEST_RESULT += $? ))
+
+
+### kernel lines list
+
+# the '-L' option should list all the available lines suitable for probing per function
+$CMD_PERF probe -L vfs_read > $LOGS_DIR/listing_kernel_lines.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "\d+\s+\{" "\d+\s+\}" "0\s+ssize_t\svfs_read" "\d+\s+\w+" < $LOGS_DIR/listing_kernel_lines.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "kernel lines list"
+(( TEST_RESULT += $? ))
+
+
+### kernel source lines list
+
+# the '-L' option should list all the available lines suitable for probing per file
+$CMD_PERF probe -L fs/read_write.c > $LOGS_DIR/listing_kernel_source_lines.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "linux/fs/read_write.c" "\d+\s+\{" "\d+\s+\}" "\d+\s+\w+" "\d+\s+.*vfs_read" "\d+\s+.*vfs_write" "Linus Torvalds" < $LOGS_DIR/listing_kernel_source_lines.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "kernel source lines list"
+(( TEST_RESULT += $? ))
+
+
+### userspace lines list --> currently NOT SUPPORTED
+if false; then
+
+# with '-x binary' the '-L' option should search for lines suitable for probing in the binary
+LONG_FUNC="some_function_with_a_really_long_name_that_must_be_longer_than_64_bytes"
+$CMD_PERF probe -x $CURRENT_TEST_DIR/examples/test -L $LONG_FUNC > $LOGS_DIR/listing_userspace_lines.log
+PERF_EXIT_CODE=$?
+
+LONG_VAR="some_variable_with_a_really_long_name_that_must_be_longer_than_64_bytes"
+../common/check_all_patterns_found.pl "\d+\s+$LONG_VAR \+= 1;" < $LOGS_DIR/listing_userspace_lines.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "userspace lines list"
+(( TEST_RESULT += $? ))
+else
+print_testcase_skipped "userspace lines list"
+fi
+
+
+### userspace source lines list --> currently NOT SUPPORTED
+if false; then
+
+# the '-L' option should be able to list whole source file as well
+LONG_FUNC="some_function_with_a_really_long_name_that_must_be_longer_than_64_bytes"
+$CMD_PERF probe -x $CURRENT_TEST_DIR/examples/test -L $CURRENT_TEST_DIR/examples/test.c > $LOGS_DIR/listing_userspace_source_lines.log
+PERF_EXIT_CODE=$?
+
+LONG_VAR="some_variable_with_a_really_long_name_that_must_be_longer_than_64_bytes"
+../common/check_all_patterns_found.pl "\d+\s+$LONG_VAR \+= 1;" < $LOGS_DIR/listing_userspace_source_lines.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "userspace source lines list"
+(( TEST_RESULT += $? ))
+else
+print_testcase_skipped "userspace source lines list"
+fi
+
+
+# print overall resutls
+print_overall_results "$TEST_RESULT"
+exit $?
diff --git a/tools/perf/testsuite/base_probe/test_probe_syntax.sh b/tools/perf/testsuite/base_probe/test_probe_syntax.sh
new file mode 100755
index 0000000..ccdc752
--- /dev/null
+++ b/tools/perf/testsuite/base_probe/test_probe_syntax.sh
@@ -0,0 +1,119 @@
+#!/bin/bash
+
+#
+# test_probe_syntax of perf_probe test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test tests adding of probes specified by some more
+# advanced expressions (see man page section "PROBE SYNTAX":
+#
+# Probe points are defined by following syntax.
+#
+# 1) Define event based on function name
+# [EVENT=]FUNC[@SRC][:RLN|+OFFS|%return|;PTN] [ARG ...]
+#
+# 2) Define event based on source file with line number
+# [EVENT=]SRC:ALN [ARG ...]
+#
+# 3) Define event based on source file with lazy pattern
+# [EVENT=]SRC;PTN [ARG ...]
+#
+#
+# This testcase checks whether the above mentioned
+# expression formats are accepted correctly by perf-probe.
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+TEST_RESULT=0
+
+TEST_PROBE="vfs_read"
+
+check_kprobes_available
+if [ $? -ne 0 ]; then
+ print_overall_skipped
+ exit 0
+fi
+
+clear_all_probes
+
+
+### custom named probe
+
+# when "new_name=" prefix is given, the probe should be named according to it
+$CMD_PERF probe myprobe=$TEST_PROBE 2> $LOGS_DIR/probe_syntax_custom_name_add.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "Added new event:" "probe:myprobe" "on $TEST_PROBE" < $LOGS_DIR/probe_syntax_custom_name_add.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "custom named probe :: add"
+(( TEST_RESULT += $? ))
+
+# the custom name should appear in the probe list
+$CMD_PERF probe -l > $LOGS_DIR/probe_syntax_custom_name_list.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "\s*probe:myprobe\s+\(on $TEST_PROBE@.+\)" < $LOGS_DIR/probe_syntax_custom_name_list.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "custom named probe :: list"
+(( TEST_RESULT += $? ))
+
+# the custom named probe should be usable
+$CMD_PERF stat -e probe:myprobe -o $LOGS_DIR/probe_syntax_custom_name_use.log -- cat /proc/uptime > /dev/null
+PERF_EXIT_CODE=$?
+
+REGEX_STAT_HEADER="\s*Performance counter stats for \'cat /proc/uptime\':"
+# the value should be greater than 1
+REGEX_STAT_VALUES="\s*[1-9][0-9]*\s+probe:myprobe"
+REGEX_STAT_TIME="\s*$RE_NUMBER\s+seconds time elapsed"
+../common/check_all_lines_matched.pl "$REGEX_STAT_HEADER" "$REGEX_STAT_VALUES" "$REGEX_STAT_TIME" "$RE_LINE_COMMENT" "$RE_LINE_EMPTY" < $LOGS_DIR/probe_syntax_custom_name_use.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "custom named probe :: use"
+(( TEST_RESULT += $? ))
+
+clear_all_probes
+
+
+### various syntax forms
+
+# the probe point can be specified many ways
+VALID_PATTERNS_BY_FUNCTION="vfs_read@fs/read_write.c vfs_read:11@fs/read_write.c vfs_read@fs/read_write.c:11 vfs_read%return"
+for desc in $VALID_PATTERNS_BY_FUNCTION; do
+ ! ( $CMD_PERF probe -f --add $desc 2>&1 | grep -q "Invalid argument" )
+ CHECK_EXIT_CODE=$?
+
+ print_results 0 $CHECK_EXIT_CODE "various syntax forms :: $desc"
+ (( TEST_RESULT += $? ))
+done
+
+clear_all_probes
+
+# the 'test.c:29' format is better to test with userspace probes,
+# since the absolute line numbers in the code does not change
+! ( $CMD_PERF probe -x $CURRENT_TEST_DIR/examples/test --add test.c:29 2>&1 | grep -q "Invalid argument" )
+CHECK_EXIT_CODE=$?
+
+print_results 0 $CHECK_EXIT_CODE "various syntax forms :: test.c:29"
+(( TEST_RESULT += $? ))
+
+# function name with retval in the userspace code
+! ( $CMD_PERF probe -x $CURRENT_TEST_DIR/examples/test --add 'some_normal_function%return $retval' 2>&1 | grep -q "Invalid argument" )
+CHECK_EXIT_CODE=$?
+
+print_results 0 $CHECK_EXIT_CODE "various syntax forms :: func%return \$retval"
+(( TEST_RESULT += $? ))
+
+clear_all_probes
+
+
+# print overall resutls
+print_overall_results "$TEST_RESULT"
+exit $?
--
1.8.3.1
^ permalink raw reply related [flat|nested] 21+ messages in thread
* [PATCH 7/9] perf test: new testsuite: perf report tests
[not found] <cover.1458134357.git.mpetlan@redhat.com>
` (5 preceding siblings ...)
2016-03-16 13:56 ` [PATCH 6/9] perf test: new testsuite: perf probe tests Michael Petlan
@ 2016-03-16 13:56 ` Michael Petlan
2016-03-16 13:57 ` [PATCH 8/9] perf test: new testsuite: perf stat tests Michael Petlan
2016-03-16 13:57 ` [PATCH 9/9] perf test: new testsuite: perf trace tests Michael Petlan
8 siblings, 0 replies; 21+ messages in thread
From: Michael Petlan @ 2016-03-16 13:56 UTC (permalink / raw)
To: linux-perf-users; +Cc: acme, jolsa
This commit adds tests for perf report tool.
Signed-off-by: Michael Petlan <mpetlan@redhat.com>
---
tools/perf/testsuite/base_report/cleanup.sh | 24 +++
tools/perf/testsuite/base_report/settings.sh | 30 ++++
tools/perf/testsuite/base_report/setup.sh | 26 ++++
.../testsuite/base_report/stderr-whitelist.txt | 1 +
tools/perf/testsuite/base_report/test_basic.sh | 169 +++++++++++++++++++++
5 files changed, 250 insertions(+)
create mode 100755 tools/perf/testsuite/base_report/cleanup.sh
create mode 100644 tools/perf/testsuite/base_report/settings.sh
create mode 100755 tools/perf/testsuite/base_report/setup.sh
create mode 100644 tools/perf/testsuite/base_report/stderr-whitelist.txt
create mode 100755 tools/perf/testsuite/base_report/test_basic.sh
diff --git a/tools/perf/testsuite/base_report/cleanup.sh b/tools/perf/testsuite/base_report/cleanup.sh
new file mode 100755
index 0000000..50e72e1
--- /dev/null
+++ b/tools/perf/testsuite/base_report/cleanup.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+#
+# cleanup.sh of perf report test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+if [ -n "$PERFSUITE_RUN_DIR" ]; then
+ print_overall_skipped
+ exit 0
+fi
+
+find . -name \*.log | xargs -r rm
+find . -name \*.err | xargs -r rm
+rm -f perf.data*
+RM_EXIT_CODE=$?
+
+print_overall_results $RM_EXIT_CODE
+exit $?
diff --git a/tools/perf/testsuite/base_report/settings.sh b/tools/perf/testsuite/base_report/settings.sh
new file mode 100644
index 0000000..fd6e10c
--- /dev/null
+++ b/tools/perf/testsuite/base_report/settings.sh
@@ -0,0 +1,30 @@
+#
+# settings.sh of perf_report test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+# FIXME
+#
+#
+
+export TEST_NAME="perf_report"
+export MY_ARCH=`arch`
+export MY_HOSTNAME=`hostname`
+export MY_KERNEL_VERSION=`uname -r`
+export MY_CPUS_ONLINE=`nproc`
+export MY_CPUS_AVAILABLE=`cat /proc/cpuinfo | grep -P "processor\s" | wc -l`
+
+if [ -n "$PERFSUITE_RUN_DIR" ]; then
+ # when $PERFSUITE_RUN_DIR is set to something, all the logs and temp files will be placed there
+ # --> the $PERFSUITE_RUN_DIR/perf_something/examples and $PERFSUITE_RUN_DIR/perf_something/logs
+ # dirs will be used for that
+ export PERFSUITE_RUN_DIR=`readlink -f $PERFSUITE_RUN_DIR`
+ export CURRENT_TEST_DIR="$PERFSUITE_RUN_DIR/$TEST_NAME"
+ test -d "$CURRENT_TEST_DIR" || mkdir -p "$CURRENT_TEST_DIR"
+ export LOGS_DIR="$PERFSUITE_RUN_DIR/$TEST_NAME/logs"
+ test -d "$LOGS_DIR" || mkdir -p "$LOGS_DIR"
+else
+ # when $PERFSUITE_RUN_DIR is not set, logs will be placed here
+ export CURRENT_TEST_DIR="."
+ export LOGS_DIR="."
+fi
diff --git a/tools/perf/testsuite/base_report/setup.sh b/tools/perf/testsuite/base_report/setup.sh
new file mode 100755
index 0000000..82f06dd
--- /dev/null
+++ b/tools/perf/testsuite/base_report/setup.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+#
+# setup.sh of perf report test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# We need some sample data for perf-report testing
+#
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+
+$CMD_PERF record -asdg -o $CURRENT_TEST_DIR/perf.data -- $CMD_LONGER_SLEEP 2> $LOGS_DIR/setup.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "$RE_LINE_RECORD1" "$RE_LINE_RECORD2" < $LOGS_DIR/setup.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "prepare the perf.data file"
+exit $?
diff --git a/tools/perf/testsuite/base_report/stderr-whitelist.txt b/tools/perf/testsuite/base_report/stderr-whitelist.txt
new file mode 100644
index 0000000..d59b483
--- /dev/null
+++ b/tools/perf/testsuite/base_report/stderr-whitelist.txt
@@ -0,0 +1 @@
+no symbols found in .*, maybe install a debug package
diff --git a/tools/perf/testsuite/base_report/test_basic.sh b/tools/perf/testsuite/base_report/test_basic.sh
new file mode 100755
index 0000000..77ce3cf
--- /dev/null
+++ b/tools/perf/testsuite/base_report/test_basic.sh
@@ -0,0 +1,169 @@
+#!/bin/bash
+
+#
+# test_basic of perf_report test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test tests basic functionality of perf report command.
+#
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+TEST_RESULT=0
+
+
+### help message
+
+if [ "$PARAM_GENERAL_HELP_TEXT_CHECK" = "y" ]; then
+ # test that a help message is shown and looks reasonable
+ $CMD_PERF report --help > $LOGS_DIR/basic_helpmsg.log
+ PERF_EXIT_CODE=$?
+
+ ../common/check_all_patterns_found.pl "PERF-REPORT" "NAME" "SYNOPSIS" "DESCRIPTION" "OPTIONS" "OVERHEAD\s+CALCULATION" "SEE ALSO" < $LOGS_DIR/basic_helpmsg.log
+ CHECK_EXIT_CODE=$?
+ ../common/check_all_patterns_found.pl "input" "verbose" "show-nr-samples" "showcpuutilization" "threads" "comms" "pid" "tid" "dsos" "symbols" "symbol-filter" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_all_patterns_found.pl "hide-unresolved" "sort" "fields" "parent" "exclude-other" "column-widths" "field-separator" "dump-raw-trace" "children" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_all_patterns_found.pl "call-graph" "max-stack" "inverted" "ignore-callees" "pretty" "stdio" "tui" "gtk" "vmlinux" "kallsyms" "modules" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_all_patterns_found.pl "force" "symfs" "cpu" "disassembler-style" "source" "asm-raw" "show-total-period" "show-info" "branch-stack" "group" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_all_patterns_found.pl "branch-history" "objdump" "demangle" "percent-limit" "percentage" "header" "itrace" "full-source-path" "show-ref-call-graph" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "help message"
+ (( TEST_RESULT += $? ))
+else
+ print_testcase_skipped "help message"
+fi
+
+
+### basic execution
+
+# test that perf report is even working
+$CMD_PERF report -i $CURRENT_TEST_DIR/perf.data --stdio > $LOGS_DIR/basic_basic.log 2> $LOGS_DIR/basic_basic.err
+PERF_EXIT_CODE=$?
+
+REGEX_LOST_SAMPLES_INFO="#\s*Total Lost Samples:\s+$RE_NUMBER"
+REGEX_SAMPLES_INFO="#\s*Samples:\s+(?:$RE_NUMBER)\w?\s+of\s+event\s+'$RE_EVENT_ANY'"
+REGEX_LINES_HEADER="#\s*Children\s+Self\s+Command\s+Shared Object\s+Symbol"
+REGEX_LINES="\s*$RE_NUMBER%\s+$RE_NUMBER%\s+\S+\s+\[kernel\.vmlinux\]\s+\[[k\.]\]\s+\w+"
+../common/check_all_patterns_found.pl "$REGEX_LOST_SAMPLES_INFO" "$REGEX_SAMPLES_INFO" "$REGEX_LINES_HEADER" "$REGEX_LINES" < $LOGS_DIR/basic_basic.log
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/basic_basic.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "basic execution"
+(( TEST_RESULT += $? ))
+
+
+### number of samples
+
+# '--show-nr-samples' should show number of samples for each symbol
+$CMD_PERF report --stdio -i $CURRENT_TEST_DIR/perf.data --show-nr-samples > $LOGS_DIR/basic_nrsamples.log 2> $LOGS_DIR/basic_nrsamples.err
+PERF_EXIT_CODE=$?
+
+REGEX_LINES_HEADER="#\s*Children\s+Self\s+Samples\s+Command\s+Shared Object\s+Symbol"
+REGEX_LINES="\s*$RE_NUMBER%\s+$RE_NUMBER%\s+$RE_NUMBER\s+\S+\s+\[kernel\.vmlinux\]\s+\[[k\.]\]\s+\w+"
+../common/check_all_patterns_found.pl "$REGEX_LINES_HEADER" "$REGEX_LINES" < $LOGS_DIR/basic_nrsamples.log
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/basic_nrsamples.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "number of samples"
+(( TEST_RESULT += $? ))
+
+
+### header
+
+# '--header' and '--header-only' should show perf report header
+( cd $CURRENT_TEST_DIR ; $CMD_PERF report --stdio --header-only > $LOGS_DIR/basic_header.log )
+PERF_EXIT_CODE=$?
+
+REGEX_LINE_HOSTNAME="#\s+hostname\s*:\s*$MY_HOSTNAME"
+REGEX_LINE_KERNEL="#\s+os release\s*:\s*$MY_KERNEL_VERSION"
+REGEX_LINE_PERF="#\s+perf version\s*:\s*"
+REGEX_LINE_ARCH="#\s+arch\s*:\s*$MY_ARCH"
+REGEX_LINE_CPUS_ONLINE="#\s+nrcpus online\s*:\s*$MY_CPUS_ONLINE"
+REGEX_LINE_CPUS_AVAIL="#\s+nrcpus avail\s*:\s*$MY_CPUS_AVAILABLE"
+../common/check_all_patterns_found.pl "$REGEX_LINE_HOSTNAME" "$REGEX_LINE_KERNEL" "$REGEX_LINE_PERF" "$REGEX_LINE_ARCH" "$REGEX_LINE_CPUS_ONLINE" "$REGEX_LINE_CPUS_AVAIL" < $LOGS_DIR/basic_header.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "header"
+(( TEST_RESULT += $? ))
+
+
+### show CPU utilization
+
+# '--showcpuutilization' should show percentage for both system and userspace mode
+( cd $CURRENT_TEST_DIR ; $CMD_PERF report --stdio --showcpuutilization > $LOGS_DIR/basic_cpuut.log 2> $LOGS_DIR/basic_cpuut.err )
+PERF_EXIT_CODE=$?
+
+REGEX_LINES_HEADER="#\s*Children\s+Self\s+sys\s+usr\s+Command\s+Shared Object\s+Symbol"
+REGEX_LINES="\s*$RE_NUMBER%\s+$RE_NUMBER%\s+$RE_NUMBER%\s+$RE_NUMBER%\s+\S+\s+\[kernel\.vmlinux\]\s+\[[k\.]\]\s+\w+"
+../common/check_all_patterns_found.pl "$REGEX_LINES_HEADER" "$REGEX_LINES" < $LOGS_DIR/basic_cpuut.log
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/basic_cpuut.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "show CPU utilization"
+(( TEST_RESULT += $? ))
+
+
+### pid
+
+# '--pid=' should limit the output for a process with the given pid only
+$CMD_PERF report --stdio -i $CURRENT_TEST_DIR/perf.data --pid=1 > $LOGS_DIR/basic_pid.log 2> $LOGS_DIR/basic_pid.err
+PERF_EXIT_CODE=$?
+
+grep -P '\s+[\d\.]+%' $LOGS_DIR/basic_pid.log | ../common/check_all_lines_matched.pl "systemd|init"
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/basic_pid.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "pid"
+(( TEST_RESULT += $? ))
+
+
+### non-existing symbol
+
+# '--symbols' should show only the given symbols
+$CMD_PERF report --stdio -i $CURRENT_TEST_DIR/perf.data --symbols=dummynonexistingsymbol > $LOGS_DIR/basic_symbols.log 2> $LOGS_DIR/basic_symbols.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "$RE_LINE_EMPTY" "$RE_LINE_COMMENT" < $LOGS_DIR/basic_symbols.log
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/basic_symbols.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "non-existing symbol"
+(( TEST_RESULT += $? ))
+
+
+### symbol filter
+
+# '--symbol-filter' should filter symbols based on substrings
+$CMD_PERF report --stdio -i $CURRENT_TEST_DIR/perf.data --symbol-filter=map > $LOGS_DIR/basic_symbolfilter.log 2> $LOGS_DIR/basic_symbolfilter.err
+PERF_EXIT_CODE=$?
+
+grep -P '\s+[\d\.]+%' $LOGS_DIR/basic_symbolfilter.log | ../common/check_all_lines_matched.pl "\[[k\.]\]\s+.*map"
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/basic_symbolfilter.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "symbol filter"
+(( TEST_RESULT += $? ))
+
+
+# TODO: $CMD_PERF report -n --showcpuutilization -TUxDg 2> 01.log
+
+# print overall resutls
+print_overall_results "$TEST_RESULT"
+exit $?
--
1.8.3.1
^ permalink raw reply related [flat|nested] 21+ messages in thread
* [PATCH 8/9] perf test: new testsuite: perf stat tests
[not found] <cover.1458134357.git.mpetlan@redhat.com>
` (6 preceding siblings ...)
2016-03-16 13:56 ` [PATCH 7/9] perf test: new testsuite: perf report tests Michael Petlan
@ 2016-03-16 13:57 ` Michael Petlan
2016-03-16 13:57 ` [PATCH 9/9] perf test: new testsuite: perf trace tests Michael Petlan
8 siblings, 0 replies; 21+ messages in thread
From: Michael Petlan @ 2016-03-16 13:57 UTC (permalink / raw)
To: linux-perf-users; +Cc: acme, jolsa
This commit adds tests for perf stat tool and event checking.
Signed-off-by: Michael Petlan <mpetlan@redhat.com>
---
tools/perf/testsuite/base_stat/cleanup.sh | 26 ++++++++
tools/perf/testsuite/base_stat/settings.sh | 25 ++++++++
tools/perf/testsuite/base_stat/test_basic.sh | 69 ++++++++++++++++++++++
tools/perf/testsuite/base_stat/test_hw.sh | 50 ++++++++++++++++
tools/perf/testsuite/base_stat/test_hwcache.sh | 51 ++++++++++++++++
.../perf/testsuite/base_stat/test_intel_uncore.sh | 46 +++++++++++++++
.../testsuite/base_stat/test_powerpc_hv_24x7.sh | 60 +++++++++++++++++++
.../base_stat/test_tracepoints_definition.sh | 62 +++++++++++++++++++
8 files changed, 389 insertions(+)
create mode 100755 tools/perf/testsuite/base_stat/cleanup.sh
create mode 100644 tools/perf/testsuite/base_stat/settings.sh
create mode 100755 tools/perf/testsuite/base_stat/test_basic.sh
create mode 100755 tools/perf/testsuite/base_stat/test_hw.sh
create mode 100755 tools/perf/testsuite/base_stat/test_hwcache.sh
create mode 100755 tools/perf/testsuite/base_stat/test_intel_uncore.sh
create mode 100755 tools/perf/testsuite/base_stat/test_powerpc_hv_24x7.sh
create mode 100755 tools/perf/testsuite/base_stat/test_tracepoints_definition.sh
diff --git a/tools/perf/testsuite/base_stat/cleanup.sh b/tools/perf/testsuite/base_stat/cleanup.sh
new file mode 100755
index 0000000..2405f845
--- /dev/null
+++ b/tools/perf/testsuite/base_stat/cleanup.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+#
+# cleanup.sh of perf stat test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+# FIXME
+#
+#
+
+. ../common/init.sh
+. ./settings.sh
+
+if [ -n "$PERFSUITE_RUN_DIR" ]; then
+ print_overall_skipped
+ exit 0
+fi
+
+find . -name \*.log | xargs -r rm
+test -d hw && rmdir hw
+test -d hwcache && rmdir hwcache
+test -d hv24x7 && rmdir hv_24x7
+test -d intel_uncore && rmdir intel_uncore
+print_overall_results 0
+exit 0
diff --git a/tools/perf/testsuite/base_stat/settings.sh b/tools/perf/testsuite/base_stat/settings.sh
new file mode 100644
index 0000000..1cf94ba
--- /dev/null
+++ b/tools/perf/testsuite/base_stat/settings.sh
@@ -0,0 +1,25 @@
+#
+# settings.sh of perf_stat test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+# FIXME
+#
+#
+
+export TEST_NAME="perf_stat"
+
+if [ -n "$PERFSUITE_RUN_DIR" ]; then
+ # when $PERFSUITE_RUN_DIR is set to something, all the logs and temp files will be placed there
+ # --> the $PERFSUITE_RUN_DIR/perf_something/examples and $PERFSUITE_RUN_DIR/perf_something/logs
+ # dirs will be used for that
+ export PERFSUITE_RUN_DIR=`readlink -f $PERFSUITE_RUN_DIR`
+ export CURRENT_TEST_DIR="$PERFSUITE_RUN_DIR/$TEST_NAME"
+ test -d "$CURRENT_TEST_DIR" || mkdir -p "$CURRENT_TEST_DIR"
+ export LOGS_DIR="$PERFSUITE_RUN_DIR/$TEST_NAME/logs"
+ test -d "$LOGS_DIR" || mkdir -p "$LOGS_DIR"
+else
+ # when $PERFSUITE_RUN_DIR is not set, logs will be placed here
+ export CURRENT_TEST_DIR="."
+ export LOGS_DIR="."
+fi
diff --git a/tools/perf/testsuite/base_stat/test_basic.sh b/tools/perf/testsuite/base_stat/test_basic.sh
new file mode 100755
index 0000000..6acc42b
--- /dev/null
+++ b/tools/perf/testsuite/base_stat/test_basic.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+#
+# test_basic of perf_stat test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test tests basic functionality of perf stat command.
+#
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+TEST_RESULT=0
+
+#### basic execution
+
+# test that perf stat is even working
+$CMD_PERF stat $CMD_SIMPLE 2> $LOGS_DIR/01.log
+PERF_EXIT_CODE=$?
+
+REGEX_HEADER="\s*Performance counter stats for 'true':"
+REGEX_LINES="\s*"$RE_NUMBER"\s+"$RE_EVENT"\s+#\s+"$RE_NUMBER"%?.*"
+../common/check_all_patterns_found.pl "$REGEX_HEADER" "$REGEX_LINES" < $LOGS_DIR/01.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "basic execution"
+(( TEST_RESULT += $? ))
+
+
+#### some options
+
+# test some basic options that they change the behaviour
+$CMD_PERF stat -i -a -c -r 3 -o /dev/stdout -- $CMD_BASIC_SLEEP > $LOGS_DIR/02.log
+PERF_EXIT_CODE=$?
+
+REGEX_HEADER="^\s*Performance counter stats for '(sleep [\d\.]+|system wide)' \(3 runs\):"
+REGEX_LINES="\s*"$RE_NUMBER"\s+"$RE_EVENT"\s+#\s+"$RE_NUMBER"%?.*\s*"$RE_NUMBER"%?.*"
+REGEX_FOOTER="^\s*"$RE_NUMBER" seconds time elapsed.*"
+../common/check_all_patterns_found.pl "$REGEX_HEADER" "$REGEX_LINES" "$REGEX_FOOTER" < $LOGS_DIR/02.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "some options"
+(( TEST_RESULT += $? ))
+
+
+#### CSV output
+
+# with -x'<SEPARATOR>' perf stat should produce a CSV output
+$CMD_PERF stat -x';' -o /dev/stdout -a -- sleep 0.1 > $LOGS_DIR/03.log
+PERF_EXIT_CODE=$?
+
+REGEX_LINES="^"$RE_NUMBER";+"$RE_EVENT
+REGEX_UNSUPPORTED_LINES="^<not supported>;+"$RE_EVENT
+REGEX_METRIC_LINE="stalled\scycles\sper\sinsn"
+../common/check_all_lines_matched.pl "$REGEX_LINES" "$REGEX_METRIC_LINE" "$REGEX_UNSUPPORTED_LINES" "$RE_LINE_EMPTY" "$RE_LINE_COMMENT" < $LOGS_DIR/03.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "CSV output"
+(( TEST_RESULT += $? ))
+
+
+# print overall resutls
+print_overall_results "$TEST_RESULT"
+exit $?
diff --git a/tools/perf/testsuite/base_stat/test_hw.sh b/tools/perf/testsuite/base_stat/test_hw.sh
new file mode 100755
index 0000000..9ec5eaf
--- /dev/null
+++ b/tools/perf/testsuite/base_stat/test_hw.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+#
+# test_hw of perf_stat test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test tests hardware events by perf stat.
+#
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+TEST_RESULT=0
+
+EVENTS_TO_TEST=`$CMD_PERF list hw | grep "Hardware event" | awk '{print $1}' | egrep '^.' | tr '\n' ' '`
+if [ -z "$EVENTS_TO_TEST" ]; then
+ print_overall_skipped
+ exit 0
+fi
+
+# FIXME test -e hw.log && rm -f hw.log
+
+test -d $LOGS_DIR/hw || mkdir $LOGS_DIR/hw
+
+
+#### testing hardware events
+
+for event in $EVENTS_TO_TEST; do
+ $CMD_PERF stat -a -e $event -o $LOGS_DIR/hw/$event.log --append -x';' -- $CMD_BASIC_SLEEP
+ PERF_EXIT_CODE=$?
+ REGEX_LINES="$RE_NUMBER;+$event;$RE_NUMBER;100\.00"
+ ../common/check_all_patterns_found.pl "$REGEX_LINES" < $LOGS_DIR/hw/$event.log
+ CHECK_EXIT_CODE=$?
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "event $event"
+ (( TEST_RESULT += $? ))
+done
+
+# print overall resutls
+print_overall_results "$TEST_RESULT"
+exit $?
+
+
+
+# FIXME we should test the numbers
+# FIXME we should be able to blacklist events on some archs (<not supported> is OK (SND, IVB))
diff --git a/tools/perf/testsuite/base_stat/test_hwcache.sh b/tools/perf/testsuite/base_stat/test_hwcache.sh
new file mode 100755
index 0000000..7755fb12
--- /dev/null
+++ b/tools/perf/testsuite/base_stat/test_hwcache.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+#
+# test_hwcache of perf_stat test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test tests hardware events by perf stat.
+#
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+TEST_RESULT=0
+
+EVENTS_TO_TEST=`$CMD_PERF list hwcache | grep "Hardware cache event" | awk '{print $1}' | egrep '^.' | tr '\n' ' '`
+if [ -z "$EVENTS_TO_TEST" ]; then
+ print_overall_skipped
+ exit 0
+fi
+
+test -d $LOGS_DIR/hwcache || mkdir $LOGS_DIR/hwcache
+
+# FIXME test -e hw.log && rm -f hw.log
+
+
+#### testing hardware events
+
+for event in $EVENTS_TO_TEST; do
+ $CMD_PERF stat -a -e $event -o $LOGS_DIR/hwcache/$event.log --append -x';' -- $CMD_BASIC_SLEEP
+ PERF_EXIT_CODE=$?
+ REGEX_LINES="$RE_NUMBER;+$event;$RE_NUMBER;100\.00"
+ ../common/check_all_patterns_found.pl "$REGEX_LINES" < $LOGS_DIR/hwcache/$event.log
+ CHECK_EXIT_CODE=$?
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "event $event"
+ (( TEST_RESULT += $? ))
+done
+
+
+# print overall resutls
+print_overall_results "$TEST_RESULT"
+exit $?
+
+
+
+# FIXME we should test the numbers
+# FIXME we should be able to blacklist events on some archs (<not supported> is OK (SND, IVB))
diff --git a/tools/perf/testsuite/base_stat/test_intel_uncore.sh b/tools/perf/testsuite/base_stat/test_intel_uncore.sh
new file mode 100755
index 0000000..768f86e
--- /dev/null
+++ b/tools/perf/testsuite/base_stat/test_intel_uncore.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+#
+# test_intel_uncore of perf_stat test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test tests support of uncore events
+#
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+TEST_RESULT=0
+
+EVENTS_TO_TEST=`$CMD_PERF list | grep "uncore" | awk '{print $1}' | tr '\n' ' '`
+if [ -z "$EVENTS_TO_TEST" ]; then
+ print_overall_skipped
+ exit 0
+fi
+
+test -d $LOGS_DIR/intel_uncore || mkdir $LOGS_DIR/intel_uncore
+
+
+#### testing Intel uncore events
+
+for event in $EVENTS_TO_TEST; do
+ EVENT_NAME=`echo $event | tr '/' '_' | tr ',' '-'`
+ $CMD_PERF stat -a -e $event -o $LOGS_DIR/intel_uncore/$EVENT_NAME.log -x';' -- $CMD_QUICK_SLEEP
+ PERF_EXIT_CODE=$?
+
+ REGEX_LINES="$RE_NUMBER;[^;]*;$RE_EVENT_ANY;$RE_NUMBER;100\.00"
+ ../common/check_all_patterns_found.pl "$REGEX_LINES" < $LOGS_DIR/intel_uncore/$EVENT_NAME.log
+ CHECK_EXIT_CODE=$?
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "event $event"
+ (( TEST_RESULT += $? ))
+done
+
+
+# print overall resutls
+print_overall_results "$TEST_RESULT"
+exit $?
diff --git a/tools/perf/testsuite/base_stat/test_powerpc_hv_24x7.sh b/tools/perf/testsuite/base_stat/test_powerpc_hv_24x7.sh
new file mode 100755
index 0000000..2d91d02
--- /dev/null
+++ b/tools/perf/testsuite/base_stat/test_powerpc_hv_24x7.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+#
+# test_powerpc_hv24x7 of perf_stat test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test tests hardware events by perf stat.
+#
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+TEST_RESULT=0
+
+EVENTS_TO_TEST=`$CMD_PERF list | grep "24x7" | grep "core" | awk '{print $1}' | tr '\n' ' '`
+if [ -z "$EVENTS_TO_TEST" ]; then
+ print_overall_skipped
+ exit 0
+fi
+
+test -d $LOGS_DIR/hv_24x7 || mkdir $LOGS_DIR/hv_24x7
+
+if [ "$PARAM_STAT_24x7_ALL_CORES" = "y" ]; then
+ NPROC=`nproc`
+ CORES_TO_TEST="`seq 0 $((NPROC-1))`"
+else
+ CORES_TO_TEST="0"
+fi
+
+#### testing hv_24x7 events
+
+for event in $EVENTS_TO_TEST; do
+ EVENT_NAME=`echo $event | awk -F',' '{print $1}' | awk -F'/' '{print $2}'`
+ PERF_EXIT_CODE=0
+ for core in $CORES_TO_TEST; do
+ evt=`echo $event | sed "s/\?/$core/"`
+ $CMD_PERF stat -a -e $evt -o $LOGS_DIR/hv_24x7/$EVENT_NAME.log --append -x';' -- $CMD_QUICK_SLEEP
+ (( PERF_EXIT_CODE += $? ))
+ done
+ REGEX_LINES="$RE_NUMBER;+hv_24x7\/$EVENT_NAME,core=$RE_NUMBER\/;$RE_NUMBER;100\.00"
+ ../common/check_all_patterns_found.pl "$REGEX_LINES" < $LOGS_DIR/hv_24x7/$EVENT_NAME.log
+ CHECK_EXIT_CODE=$?
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "event $EVENT_NAME"
+ (( TEST_RESULT += $? ))
+done
+
+# print overall resutls
+print_overall_results "$TEST_RESULT"
+exit $?
+
+
+
+# FIXME we should test the numbers
+# FIXME add lpar/vcpu events? maybe configurable
+# FIXME "You have POWER8 LPAR, so I think you should have hv24x7 but you do not!" warning
diff --git a/tools/perf/testsuite/base_stat/test_tracepoints_definition.sh b/tools/perf/testsuite/base_stat/test_tracepoints_definition.sh
new file mode 100755
index 0000000..ca5ac9d
--- /dev/null
+++ b/tools/perf/testsuite/base_stat/test_tracepoints_definition.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+
+#
+# test_tracepoints_definition.sh of perf_stat test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test checks the tracepoints for syntax
+# errors in definition. It takes a long time, so
+# that's why the test can be disabled by an option
+# in common/parametrization.sh
+#
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+TEST_RESULT=0
+
+if [ ! "$PARAM_STAT_TRACEPOINT_EVENTS_SYNTAX" == "y" ]; then
+ print_overall_skipped
+ exit 0
+fi
+
+test -d $LOGS_DIR/tracepoint_def || mkdir -p $LOGS_DIR/tracepoint_def
+
+# remove old logs
+rm -f $LOGS_DIR/tracepoint_def/tracepoints_def_*.log
+
+### check all the tracepoint events of all the available subsystems
+
+SUBSYSTEMS=`$CMD_PERF list tracepoint | grep "Tracepoint event" | awk '{print $1}' | awk -F':' '{print $1}' | sort -u`
+for subs in $SUBSYSTEMS; do
+ TRACEPOINT_EVENTS=`$CMD_PERF list $subs:\* | grep "Tracepoint event" | awk '{print $1}' | tr '\n' ' '`
+ PERF_EXIT_CODE=0
+ for tp in $TRACEPOINT_EVENTS; do
+ $CMD_PERF stat -e $tp -o /dev/stdout true > $LOGS_DIR/tracepoint_def/out 2> $LOGS_DIR/tracepoint_def/err
+ (( PERF_EXIT_CODE += $? ))
+ echo -n "$tp is " >> $LOGS_DIR/tracepoint_def/tracepoints_def_$subs.log
+
+ # check whether the event is supported when it is listed
+ grep -qi "not supported" $LOGS_DIR/tracepoint_def/out
+ test $? -eq 0 && echo -n "NOT SUPPORTED and " >> $LOGS_DIR/tracepoint_def/tracepoints_def_$subs.log || echo -n "supported and " >> $LOGS_DIR/tracepoint_def/tracepoints_def_$subs.log
+
+ # check whether the event causes any warnings
+ test -s $LOGS_DIR/tracepoint_def/err
+ test $? -eq 0 && echo "CAUSES WARNINGS" >> $LOGS_DIR/tracepoint_def/tracepoints_def_$subs.log || echo "is defined correctly" >> $LOGS_DIR/tracepoint_def/tracepoints_def_$subs.log
+ done
+
+ # check for the results
+ ! grep -e "CAUSES WARNINGS" -e "NOT SUPPORTED" $LOGS_DIR/tracepoint_def/tracepoints_def_$subs.log
+ print_results $PERF_EXIT_CODE $? "subsystem $subs"
+ (( TEST_RESULT += $? ))
+done
+
+rm -f $LOGS_DIR/tracepoint_def/err $LOGS_DIR/tracepoint_def/out
+
+print_overall_results "$TEST_RESULT"
+exit $?
--
1.8.3.1
^ permalink raw reply related [flat|nested] 21+ messages in thread
* [PATCH 9/9] perf test: new testsuite: perf trace tests
[not found] <cover.1458134357.git.mpetlan@redhat.com>
` (7 preceding siblings ...)
2016-03-16 13:57 ` [PATCH 8/9] perf test: new testsuite: perf stat tests Michael Petlan
@ 2016-03-16 13:57 ` Michael Petlan
8 siblings, 0 replies; 21+ messages in thread
From: Michael Petlan @ 2016-03-16 13:57 UTC (permalink / raw)
To: linux-perf-users; +Cc: acme, jolsa
This commit adds tests for perf trace tool.
Signed-off-by: Michael Petlan <mpetlan@redhat.com>
---
tools/perf/testsuite/base_trace/cleanup.sh | 24 ++++
tools/perf/testsuite/base_trace/settings.sh | 25 ++++
tools/perf/testsuite/base_trace/setup.sh | 16 +++
.../perf/testsuite/base_trace/stderr-whitelist.txt | 1 +
tools/perf/testsuite/base_trace/test_basic.sh | 131 +++++++++++++++++++++
tools/perf/testsuite/base_trace/test_overhead.sh | 51 ++++++++
tools/perf/testsuite/base_trace/test_record.sh | 45 +++++++
7 files changed, 293 insertions(+)
create mode 100755 tools/perf/testsuite/base_trace/cleanup.sh
create mode 100644 tools/perf/testsuite/base_trace/settings.sh
create mode 100755 tools/perf/testsuite/base_trace/setup.sh
create mode 100644 tools/perf/testsuite/base_trace/stderr-whitelist.txt
create mode 100755 tools/perf/testsuite/base_trace/test_basic.sh
create mode 100755 tools/perf/testsuite/base_trace/test_overhead.sh
create mode 100755 tools/perf/testsuite/base_trace/test_record.sh
diff --git a/tools/perf/testsuite/base_trace/cleanup.sh b/tools/perf/testsuite/base_trace/cleanup.sh
new file mode 100755
index 0000000..03984f4
--- /dev/null
+++ b/tools/perf/testsuite/base_trace/cleanup.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+#
+# cleanup.sh of perf_trace test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+# FIXME
+#
+#
+
+. ../common/init.sh
+. ./settings.sh
+
+if [ -n "$PERFSUITE_RUN_DIR" ]; then
+ print_overall_skipped
+ exit 0
+fi
+
+find . -name \*.log | xargs -r rm
+find . -name \*.err | xargs -r rm
+rm -f perf.data*
+print_overall_results 0
+exit 0
diff --git a/tools/perf/testsuite/base_trace/settings.sh b/tools/perf/testsuite/base_trace/settings.sh
new file mode 100644
index 0000000..38153fe
--- /dev/null
+++ b/tools/perf/testsuite/base_trace/settings.sh
@@ -0,0 +1,25 @@
+#
+# settings.sh of perf_trace test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+# FIXME
+#
+#
+
+export TEST_NAME="perf_trace"
+
+if [ -n "$PERFSUITE_RUN_DIR" ]; then
+ # when $PERFSUITE_RUN_DIR is set to something, all the logs and temp files will be placed there
+ # --> the $PERFSUITE_RUN_DIR/perf_something/examples and $PERFSUITE_RUN_DIR/perf_something/logs
+ # dirs will be used for that
+ export PERFSUITE_RUN_DIR=`readlink -f $PERFSUITE_RUN_DIR`
+ export CURRENT_TEST_DIR="$PERFSUITE_RUN_DIR/$TEST_NAME"
+ test -d "$CURRENT_TEST_DIR" || mkdir -p "$CURRENT_TEST_DIR"
+ export LOGS_DIR="$PERFSUITE_RUN_DIR/$TEST_NAME/logs"
+ test -d "$LOGS_DIR" || mkdir -p "$LOGS_DIR"
+else
+ # when $PERFSUITE_RUN_DIR is not set, logs will be placed here
+ export CURRENT_TEST_DIR="."
+ export LOGS_DIR="."
+fi
diff --git a/tools/perf/testsuite/base_trace/setup.sh b/tools/perf/testsuite/base_trace/setup.sh
new file mode 100755
index 0000000..8d5ccaa
--- /dev/null
+++ b/tools/perf/testsuite/base_trace/setup.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+#
+# setup.sh of SKELETON test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+# FIXME
+#
+#
+
+. ../common/init.sh
+. ./settings.sh
+
+print_overall_results 0
+exit 0
diff --git a/tools/perf/testsuite/base_trace/stderr-whitelist.txt b/tools/perf/testsuite/base_trace/stderr-whitelist.txt
new file mode 100644
index 0000000..d59b483
--- /dev/null
+++ b/tools/perf/testsuite/base_trace/stderr-whitelist.txt
@@ -0,0 +1 @@
+no symbols found in .*, maybe install a debug package
diff --git a/tools/perf/testsuite/base_trace/test_basic.sh b/tools/perf/testsuite/base_trace/test_basic.sh
new file mode 100755
index 0000000..91a3593
--- /dev/null
+++ b/tools/perf/testsuite/base_trace/test_basic.sh
@@ -0,0 +1,131 @@
+#!/bin/bash
+
+#
+# test_basic of perf_trace test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test tests basic functionality of perf trace command.
+#
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+TEST_RESULT=0
+
+#### help message
+
+if [ "$PARAM_GENERAL_HELP_TEXT_CHECK" = "y" ]; then
+ # test that a help message is shown and looks reasonable
+ $CMD_PERF trace --help > $LOGS_DIR/basic_helpmsg.log
+ PERF_EXIT_CODE=$?
+
+ ../common/check_all_patterns_found.pl "PERF-TRACE" "NAME" "SYNOPSIS" "DESCRIPTION" "OPTIONS" "PAGEFAULTS" "EXAMPLES" "SEE ALSO" "NOTES" < $LOGS_DIR/basic_helpmsg.log
+ CHECK_EXIT_CODE=$?
+ ../common/check_all_patterns_found.pl "all-cpus" "expr" "output" "pid" "tid" "uid" "verbose" "cpu" "duration" "summary" "sched" "event" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_all_patterns_found.pl "perf trace record" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "help message"
+ (( TEST_RESULT += $? ))
+else
+ print_testcase_skipped "help message"
+fi
+
+#### basic execution
+
+# test that perf trace is working
+$CMD_PERF trace $CMD_QUICK_SLEEP 2> $LOGS_DIR/basic_basic.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "$RE_LINE_TRACE" < $LOGS_DIR/basic_basic.log
+CHECK_EXIT_CODE=$?
+../common/check_all_patterns_found.pl "$RE_LINE_TRACE" < $LOGS_DIR/basic_basic.log
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "basic execution"
+(( TEST_RESULT += $? ))
+
+
+### duration threshold
+
+# '--duration X' should show only syscalls that take longer than X ms
+$CMD_PERF trace --duration 80 $CMD_BASIC_SLEEP 2> $LOGS_DIR/basic_duration.log
+PERF_EXIT_CODE=$?
+
+REGEX_SLEEP_SYSCALL_ONLY="^\s*$RE_NUMBER\s*\(\s*$RE_NUMBER\s*ms\s*\):\s*$RE_PROCESS_PID\s+\w*sleep\(.*\)\s+=\s+\-?$RE_NUMBER|$RE_NUMBER_HEX.*$"
+../common/check_all_lines_matched.pl "$REGEX_SLEEP_SYSCALL_ONLY" < $LOGS_DIR/basic_duration.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "duration threshold"
+(( TEST_RESULT += $? ))
+
+
+### systemwide
+
+# '-a' should trace system-wide from all CPUs
+$CMD_PERF trace -o $LOGS_DIR/basic_systemwide.log -a &
+PERF_PID=$!
+$CMD_LONGER_SLEEP
+kill -SIGINT $PERF_PID
+wait $PERF_PID
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "$RE_LINE_TRACE" < $LOGS_DIR/basic_systemwide.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "systemwide"
+(( TEST_RESULT += $? ))
+
+
+### full timestamp
+
+# '-T' should print the full timestamp instead of the relative one
+$CMD_PERF trace -T -- $CMD_QUICK_SLEEP 2> $LOGS_DIR/basic_full_timestamp.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "$RE_LINE_TRACE" "\d{5,}\." < $LOGS_DIR/basic_full_timestamp.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "full timestamp"
+(( TEST_RESULT += $? ))
+
+
+### summary
+
+# '-s' should print out a summary table
+$CMD_PERF trace -s -- $CMD_QUICK_SLEEP 2> $LOGS_DIR/basic_summary.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "$RE_LINE_EMPTY" "$RE_LINE_TRACE_SUMMARY_HEADER" "$RE_LINE_TRACE_SUMMARY_CONTENT" < $LOGS_DIR/basic_summary.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "summary"
+(( TEST_RESULT += $? ))
+
+
+### attach process
+
+# perf-trace should be able to attach an existing process by '-p PID'
+$CMD_BASIC_SLEEP &
+$CMD_PERF trace -p $! -o $LOGS_DIR/basic_attach.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "$RE_LINE_TRACE" < $LOGS_DIR/basic_attach.log
+CHECK_EXIT_CODE=$?
+# perf should know the syscall even if perf attached during it (sleep)
+../common/check_all_patterns_found.pl "sleep" "close" "exit" < $LOGS_DIR/basic_attach.log
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "attach process"
+(( TEST_RESULT += $? ))
+
+
+# print overall resutls
+print_overall_results "$TEST_RESULT"
+exit $?
diff --git a/tools/perf/testsuite/base_trace/test_overhead.sh b/tools/perf/testsuite/base_trace/test_overhead.sh
new file mode 100755
index 0000000..023aaff
--- /dev/null
+++ b/tools/perf/testsuite/base_trace/test_overhead.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+#
+# test_overhead of perf_trace test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test tries to trace a heavier load.
+#
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+TEST_RESULT=0
+
+
+# skip if not enabled
+if [ "$PARAM_TRACE_OVERLOAD" = "n" ]; then
+ print_overall_skipped
+ exit 0
+fi
+
+
+#### systemwide
+
+# system-wide tracing limited by sleep time should finish
+$CMD_PERF trace -o $LOGS_DIR/overhead_systemwide.log -a -- $CMD_LONGER_SLEEP &
+PERF_PID=$!
+$CMD_LONGER_SLEEP
+$CMD_LONGER_SLEEP
+! kill -SIGINT $PERF_PID &> overhead_systemwide_kill.log
+wait $PERF_PID
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "$RE_LINE_TRACE" < $LOGS_DIR/overhead_systemwide.log
+CHECK_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "No such process" < overhead_systemwide_kill.log
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "systemwide"
+(( TEST_RESULT += $? ))
+
+
+# print overall resutls
+print_overall_results "$TEST_RESULT"
+exit $?
diff --git a/tools/perf/testsuite/base_trace/test_record.sh b/tools/perf/testsuite/base_trace/test_record.sh
new file mode 100755
index 0000000..672f1ea
--- /dev/null
+++ b/tools/perf/testsuite/base_trace/test_record.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+#
+# test_record of perf_trace test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test tests the sampling mode of perf-trace.
+#
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+TEST_RESULT=0
+
+
+#### basic execution
+
+# test that perf trace record is working
+$CMD_PERF trace record -o $CURRENT_TEST_DIR/perf.data $CMD_QUICK_SLEEP 2> $LOGS_DIR/record_basic_record.log
+PERF_EXIT_CODE=$?
+$CMD_PERF report --stdio -i $CURRENT_TEST_DIR/perf.data > $LOGS_DIR/record_basic_report.log 2> $LOGS_DIR/record_basic_report.err
+(( PERF_EXIT_CODE += $? ))
+
+# check the perf record output
+../common/check_all_lines_matched.pl "$RE_LINE_RECORD1" "$RE_LINE_RECORD2" < $LOGS_DIR/record_basic_record.log
+CHECK_EXIT_CODE=$?
+# check the perf report output
+../common/check_all_lines_matched.pl "$RE_LINE_REPORT_CONTENT" "$RE_LINE_EMPTY" "$RE_LINE_COMMENT" < $LOGS_DIR/record_basic_report.log
+(( CHECK_EXIT_CODE += $? ))
+# check that the perf report stderr is empty
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/record_basic_report.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "basic execution - record"
+(( TEST_RESULT += $? ))
+
+
+# print overall resutls
+print_overall_results "$TEST_RESULT"
+exit $?
--
1.8.3.1
^ permalink raw reply related [flat|nested] 21+ messages in thread