From mboxrd@z Thu Jan 1 00:00:00 1970 From: Jasvinder Singh Subject: [PATCH] ip_pipeline: add scripts file for pipeline to core mappings Date: Fri, 6 May 2016 19:32:40 +0100 Message-ID: <1462559560-75937-1-git-send-email-jasvinder.singh@intel.com> Mime-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: quoted-printable Cc: cristian.dumitrescu@intel.com, Guruprasad Mukundarao To: dev@dpdk.org Return-path: Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id C917C29B6 for ; Fri, 6 May 2016 20:26:44 +0200 (CEST) List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Guruprasad Mukundarao This script parses the application configuration file and detects all the pipelines specified therein, and then, it generates all the possible mapp= ings of those pipelines on the specified CPU core-list. As a result, each of the possible pipeline-to-core mappings is saved as separate output configuration file. For example- if input file is edge_router_downstream.cfg with 3 pipeline (excluding pipeline 0) and core-list is "1, 2", following combinations will be generated- Pipeline 1 Pipeline 2 Pipeline 3 Core =3D 1 Core =3D 1 Core =3D 2 Core =3D 1 Core =3D 2 Core =3D 1 Core =3D 2 Core =3D 1 Core =3D 1 Core =3D 2 Core =3D 2 Core =3D 1 Core =3D 2 Core =3D 1 Core =3D 2 Core =3D 1 Core =3D 2 Core =3D 2 Core =3D C1 Core =3D C1H Core =3D C2 Core =3D C1 Core =3D C2 Core =3D C1H Core =3D C2 Core =3D C1 Core =3D C1H This script will help users to analyse the performance of application by evaluating all the generated configuration files with different pipelines-to-core mappings and obtaining the application configuration file with best performance. Signed-off-by: Guruprasad Mukundarao --- .../ip_pipeline/config/pipeline-to-core-mapping.py | 990 +++++++++++++++= ++++++ 1 file changed, 990 insertions(+) create mode 100644 examples/ip_pipeline/config/pipeline-to-core-mapping.= py diff --git a/examples/ip_pipeline/config/pipeline-to-core-mapping.py b/ex= amples/ip_pipeline/config/pipeline-to-core-mapping.py new file mode 100644 index 0000000..6fdac91 --- /dev/null +++ b/examples/ip_pipeline/config/pipeline-to-core-mapping.py @@ -0,0 +1,990 @@ +#! /usr/bin/python3 + +# BSD LICENSE +# +# Copyright(c) 2010-2016 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyrigh= t +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FO= R +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL= , +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE= , +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON AN= Y +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE US= E +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# +# This script maps the set of pipelines identified (MASTER pipelines are= =20 +# ignored) from the input configuration file to the set of cores=20 +# provided as input argument and creates configuration files for each of= =20 +# the mapping combinations. +# + +import sys +import array +import itertools +import re +import argparse +import os +from collections import namedtuple + +#default values +enable_stage0_traceout =3D 1 +enable_stage1_traceout =3D 1 +enable_stage2_traceout =3D 1 + +#enable_stage0_fileout =3D 0 +enable_stage1_fileout =3D 1 +enable_stage2_fileout =3D 1 + +#pattern for physical core +pattern_phycore =3D '^(s|S)\d(c|C)[1-9][0-9]*$' +reg_phycore =3D re.compile(pattern_phycore) + +#-----------------------------------------------------------------------= ------=20 +def popcount(mask): = =20 + return bin(mask).count("1") + +#-----------------------------------------------------------------------= ------=20 +def len2mask(length): + if (length =3D=3D 0): + return 0 + + if (length > 64): + sys.exit('error: len2mask - lenght %i > 64. exiting' %length) + =20 + return (0xFFFFFFFFFFFFFFFF >> (64 - length)) + +#-----------------------------------------------------------------------= ------=20 +def bitstring_write(n, n_bits): + tmpstr =3D "" + if (n_bits > 64): + return + + i =3D n_bits - 1 + while (i >=3D 0): + cond =3D (n & (1 << i)) + if (cond): + print('1', end=3D'') + tmpstr +=3D '1' + else: + print('0', end=3D'') + tmpstr +=3D '0' + i -=3D 1 + #end while + return tmpstr +#end function + +#-----------------------------------------------------------------------= --=20 +Constants =3D namedtuple('Constants', ['MAX_CORES', 'MAX_PIPELINES']) +constants =3D Constants(16, 64) + + +#-----------------------------------------------------------------------= --=20 +class Cores0: + def __init__(self): + self.n_pipelines =3D 0 + +class Cores1: + def __init__(self): + self.pipelines =3D 0 + self.n_pipelines =3D 0 + +class Cores2: + def __init__(self): + self.pipelines =3D 0 + self.n_pipelines =3D 0 + self.counter =3D 0 + self.counter_max =3D 0 + self.bitpos =3D array.array("L", itertools.repeat(0,constants.MA= X_PIPELINES)) + + +#-------------------------------------------------------------------- +class Context0: + def __init__(self): + self.cores =3D [Cores0() for i in range(0, constants.MAX_CORES)] + self.n_cores =3D 0 + self.n_pipelines =3D 0 + self.n_pipelines0 =3D 0 + self.pos =3D 0 + self.file_comment =3D "" + self.c1 =3D None + self.c2 =3D None + + #------------------------------------------------------------- + def stage0_print(self): + print('printing Context0 obj') + print('c0.cores(n_pipelines) =3D [ ', end=3D'') + for cores_count in range(0, constants.MAX_CORES): + print(self.cores[cores_count].n_pipelines, end=3D' ') + print(']') + print('c0.n_cores =3D %d' %self.n_cores) + print('c0.n_pipelines =3D %d' %self.n_pipelines) + print('c0.n_pipelines0 =3D %d' %self.n_pipelines0) + print('c0.pos =3D %d' %self.pos) + print('c0.file_comment =3D %s' %self.file_comment) + if (self.c1 is not None): + print('c0.c1 =3D ', end=3D'') + print(repr(self.c1)) + else: + print('c0.c1 =3D None') + + if (self.c2 is not None): + print('c0.c2 =3D ', end=3D'') + print(repr(self.c2)) + else: + print('c0.c2 =3D None') + + =20 + #------------------------------------------------------------- + def stage0_init(self, num_cores, num_pipelines, c1, c2): + self.n_cores =3D num_cores + self.n_pipelines =3D num_pipelines + self.c1 =3D c1 + self.c2 =3D c2 + + #------------------------------------------------------------- + def stage0_process(self): + #print('inside stage0_process') + + # stage0 init + self.cores[0].n_pipelines =3D self.n_pipelines + self.n_pipelines0 =3D 0 + self.pos =3D 1 + + while True: + #go forward + while True: + if ((self.pos < self.n_cores) and (self.n_pipelines0 > 0= )): + self.cores[self.pos].n_pipelines =3D min(self.cores[= self.pos - 1].n_pipelines, self.n_pipelines0) + self.n_pipelines0 -=3D self.cores[self.pos].n_pipeli= nes + =20 + self.pos +=3D 1 + else: + break + + #end while + =20 + # check solution + if (self.n_pipelines0 =3D=3D 0): + self.stage0_log() + self.c1.stage1_init(self, self.c2) # self is object c0 + self.c1.stage1_process() + =20 + # go backward + while True: + if (self.pos =3D=3D 0): + return + + self.pos -=3D 1 + if ((self.cores[self.pos].n_pipelines >1) and + (self.pos !=3D (self.n_cores -1))): + break + =20 + self.n_pipelines0 +=3D self.cores[self.pos].n_pipelines + self.cores[self.pos].n_pipelines =3D 0 + #end while + + # rearm + self.cores[self.pos].n_pipelines -=3D 1 + self.n_pipelines0 +=3D 1 + self.pos +=3D 1 + #end while =20 + #end function =20 + + + #------------------------------------------------------------- + def stage0_log(self): + tmp_file_comment =3D "" + if(enable_stage0_traceout !=3D 1): + return + + print('STAGE0: ', end=3D'') + tmp_file_comment +=3D 'STAGE0: ' + for cores_count in range(0, self.n_cores): + print('C%d =3D %d\t' \ + %(cores_count, self.cores[cores_count].n_pipelines), end=3D'= ') + tmp_file_comment +=3D "C{} =3D {}\t".format(cores_count, \ + self.cores[cores_count].n_pipelines) + #end for + print('') + self.c1.stage0_file_comment =3D tmp_file_comment + self.c2.stage0_file_comment =3D tmp_file_comment + + # end function + +#end class Context0 + + +#------------------------------------------------------------- +class Context1: + #class attribute + _fileTrace =3D None + + def __init__(self): + self.cores =3D [Cores1() for i in range(constants.MAX_CORES)] + self.n_cores =3D 0 + self.n_pipelines =3D 0 + self.pos =3D 0 + self.stage0_file_comment =3D "" + self.stage1_file_comment =3D "" + + self.c2 =3D None + self.arr_pipelines2cores =3D [] + #end init + + #------------------------------------------------------------- + def stage1_reset(self): + for i in range(constants.MAX_CORES): + self.cores[i].pipelines =3D 0 + self.cores[i].n_pipelines =3D 0 + =20 + self.n_cores =3D 0 + self.n_pipelines =3D 0 + self.pos =3D 0 + self.c2 =3D None + self.arr_pipelines2cores.clear() + #end def stage1_reset + + #------------------------------------------------------------- + def stage1_print(self): + print('printing Context1 obj') + print('c1.cores(pipelines,n_pipelines) =3D [ ', end=3D'') + for cores_count in range(0, constants.MAX_CORES): + print('(%d,%d)' %(self.cores[cores_count].pipelines,=20 + self.cores[cores_count].n_pipelines), end=3D= ' ') + print(']') + print('c1.n_cores =3D %d' %self.n_cores) + print('c1.n_pipelines =3D %d' %self.n_pipelines) + print('c1.pos =3D %d' %self.pos) + print('c1.stage0_file_comment =3D %s' %self.stage0_file_comment) + print('c1.stage1_file_comment =3D %s' %self.stage1_file_comment) + if (self.c2 is not None): + print('c1.c2 =3D ', end=3D'') + print(self.c2) + else: + print('c1.c2 =3D None') + #end stage1_print + + #------------------------------------------------------------- + def stage1_init(self, c0, c2): + self.stage1_reset() =20 + self.n_cores =3D 0 + while (c0.cores[self.n_cores].n_pipelines > 0): + self.n_cores +=3D 1 + =20 + self.n_pipelines =3D c0.n_pipelines + self.c2 =3D c2 + + self.arr_pipelines2cores =3D [0] * self.n_pipelines + + i =3D 0 + while (i < self.n_cores): + self.cores[i].n_pipelines =3D c0.cores[i].n_pipelines + i +=3D 1 + #end while + #end stage1_init + #------------------------------------------------------------- + def stage1_process(self): + pipelines_max =3D len2mask(self.n_pipelines) + + while True: + pos =3D 0 + overlap =3D 0 + + if (self.cores[self.pos].pipelines =3D=3D pipelines_max): + if (self.pos =3D=3D 0): + return + + self.cores[self.pos].pipelines =3D 0 + self.pos -=3D 1 + continue + #end if + + self.cores[self.pos].pipelines +=3D 1 + =20 + if (popcount(self.cores[self.pos].pipelines) \ + !=3D self.cores[self.pos].n_pipelines): + continue + + overlap =3D 0 + pos =3D 0 + while (pos < self.pos): + if ((self.cores[self.pos].pipelines) & \ + (self.cores[pos].pipelines)): + overlap =3D 1 + break + pos +=3D 1 + #end while + + if (overlap): + continue + + =20 + if ((self.pos > 0) and \ + ((self.cores[self.pos].n_pipelines) =3D=3D (self.cores[se= lf.pos - 1].n_pipelines)) and \ + ((self.cores[self.pos].pipelines) < (self.cores[self.pos = - 1].pipelines))): + continue + + if (self.pos =3D=3D self.n_cores - 1): + self.stage1_log() + self.c2.stage2_init(self) + self.c2.stage2_process() + + if (self.pos =3D=3D 0): + return + + self.cores[self.pos].pipelines =3D 0 + self.pos -=3D 1 + continue + #endif + + self.pos +=3D 1 + #end for + #end stage1_process + =20 + =20 + #------------------------------------------------------------- + def stage1_log(self): + tmp_file_comment =3D "" + if(enable_stage1_traceout =3D=3D 1): + print('STAGE1: ', end =3D '') + tmp_file_comment +=3D 'STAGE1: ' + i =3D 0 + while (i < self.n_cores): + print('C%d =3D [' %i, end=3D'') + tmp_file_comment +=3D "C{} =3D [".format(i) + + j =3D self.n_pipelines - 1 + while (j >=3D 0): + cond =3D ((self.cores[i].pipelines) & (1 << j)) + if (cond): + print('1', end=3D'') + tmp_file_comment +=3D '1' + else: + print('0', end=3D'') + tmp_file_comment +=3D '0' + j -=3D 1 + =20 + print(']\t', end=3D'') + tmp_file_comment +=3D ']\t' + i +=3D 1 + #end while + print('\n', end =3D'') + #tmp_file_comment +=3D '\n' + self.stage1_file_comment =3D tmp_file_comment + self.c2.stage1_file_comment =3D tmp_file_comment + #endif + + #check if file traceing is enabled =20 + if(enable_stage1_fileout !=3D 1): + return + + #spit out the combination to file + self.stage1_process_file() + #end function stage1_log + + + #-------------------------------------------------------------------= -----=20 + def stage1_updateCoresInBuf(self, nPipeline, sCore): + + rePipeline =3D self._fileTrace.arr_pipelines[nPipeline] + rePipeline =3D rePipeline.replace("[","\[").replace("]","\]") + reCore =3D 'core\s*=3D\s*((\d*)|(((s|S)\d)?(c|C)[1-9][0-9]*)).*\= n' + sSubs =3D 'core =3D ' + sCore + '\n' + + reg_pipeline =3D re.compile(rePipeline) + search_match =3D reg_pipeline.search(self._fileTrace.in_buf) + #debug + #print(search_match) + + if(search_match): + pos =3D search_match.start() + substr1 =3D self._fileTrace.in_buf[:pos] + substr2 =3D self._fileTrace.in_buf[pos:] + substr2 =3D re.sub(reCore, sSubs, substr2,1) + self._fileTrace.in_buf =3D substr1 + substr2 + #endif + #end function stage1_updateCoresInBuf + + #-------------------------------------------------------------------= -----=20 + def stage1_process_file(self): + outFileName =3D os.path.join(self._fileTrace.out_path, \ + self._fileTrace.prefix_outfile) + =20 + i =3D 0 #represents core number + while (i < self.n_cores): + outFileName +=3D '_' + outFileName +=3D str(self.cores[i].pipelines) + + j =3D self.n_pipelines - 1 + pipeline_idx =3D 0 + while(j >=3D 0): + cond =3D ((self.cores[i].pipelines) & (1 << j)) + if (cond): + #update the pipelines array to match the core + # only in case of cond match + self.arr_pipelines2cores[pipeline_idx] =3D fileTrace= .in_physical_cores[i] + #endif + j -=3D 1 + pipeline_idx +=3D 1 + #end while + i +=3D 1 + #end while + + # update the in_buf as per the arr_pipelines2cores =20 + for pipeline_idx in range(len(self.arr_pipelines2cores)): + self.stage1_updateCoresInBuf(pipeline_idx,self.arr_pipelines= 2cores[pipeline_idx]) + + =20 + #by now the in_buf is all set to be written to file=20 + outFileName +=3D self._fileTrace.suffix_outfile + outputFile =3D open(outFileName, "w") + =20 + #write out the comments=20 + outputFile.write("; =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= Pipeline-to-Core Mapping =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= \n") + outputFile.write("; Generated from file {}\n".format(self._fileT= race.in_file_namepath)) + outputFile.write("; Input pipelines =3D {}\n; Input cores =3D {}= \n" \ + .format(fileTrace.arr_pipelines, fileTrace.in_physical_c= ores)) + + strTruncated =3D ("", "(Truncated)") [self._fileTrace.ncores_tru= ncated] + outputFile.write("; N_PIPELINES =3D {} N_CORES =3D {} {}\n"\ + .format(self._fileTrace.n_pipelines, self._fileTrace.n_c= ores, strTruncated)) + + outputFile.write("; {}\n".format(self.stage0_file_comment)) #sta= ge0 comment + outputFile.write("; {}\n".format(self.stage1_file_comment)) #sta= ge1 comment + #debugging + #outputFile.write("; <<<>>>= ") + #outputFile.write("; stage1_arr_pipelines2cores =3D {}".format(s= elf.arr_pipelines2cores)) + outputFile.write("; =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D\n") + outputFile.write(";\n") + + # + outputFile.write(self._fileTrace.in_buf) + outputFile.flush() + outputFile.close() + #end function stage1_process_file +# end class Context1 + + +#-----------------------------------------------------------------------= ------=20 +class Context2: + #class attribute + _fileTrace =3D None + + def __init__(self): + self.cores =3D [Cores2() for i in range(constants.MAX_CORES)] + self.n_cores =3D 0 + self.n_pipelines =3D 0 + self.pos =3D 0 + self.stage0_file_comment =3D "" + self.stage1_file_comment =3D "" + self.stage2_file_comment =3D "" + + #each array entry is a pipeline mapped to core stored as string + # pipeline ranging from 1 to n, however stored in zero based arr= ay + self.arr2_pipelines2cores =3D [] + + #------------------------------------------------------------- + def stage2_print(self): + print('printing Context2 obj') + print('c2.cores(pipelines, n_pipelines, counter, counter_max) =3D= ') + for cores_count in range(0, constants.MAX_CORES): + print('core[%d] =3D (%d,%d,%d,%d)' %(cores_count, + self.cores[cores_count].pipelines, \ + self.cores[cores_count].n_pipelines,= \ + self.cores[cores_count].counter, \ + self.cores[cores_count].counter_max)= ) + + print('c2.n_cores =3D %d' %self.n_cores, end=3D'') + print('c2.n_pipelines =3D %d' %self.n_pipelines, end=3D'') + print('c2.pos =3D %d' %self.pos) + print('c2.stage0_file_comment =3D %s' %self.self.stage0_file= _comment) + print('c2.stage1_file_comment =3D %s' %self.self.stage1_file= _comment) + print('c2.stage2_file_comment =3D %s' %self.self.stage2_file= _comment) + #end for + #end function stage2_print + =20 + + #------------------------------------------------------------- + def stage2_reset(self): + for i in range(0, constants.MAX_CORES): + self.cores[i].pipelines =3D 0 + self.cores[i].n_pipelines =3D 0; + self.cores[i].counter =3D 0; + self.cores[i].counter_max =3D 0 + =20 + for idx in range(0, constants.MAX_PIPELINES): + self.cores[i].bitpos[idx] =3D 0 + + self.n_cores =3D 0 + self.n_pipelines =3D 0 + self.pos =3D 0 + + self.arr2_pipelines2cores.clear()=20 + #end stage2_reset + + #------------------------------------------------------------- + def bitpos_load(self, coreidx): + i =3D j =3D 0 + while (i < self.n_pipelines): + if ((self.cores[coreidx].pipelines) & \ + (1 << i)): + self.cores[coreidx].bitpos[j] =3D i + j +=3D 1 + i +=3D 1 + self.cores[coreidx].n_pipelines =3D j + + + #------------------------------------------------------------- + def bitpos_apply(self, in_buf, pos, n_pos): + out =3D 0 + for i in range(0, n_pos): + out |=3D (in_buf & (1 << i)) << (pos[i] - i) + + return out + + + #------------------------------------------------------------- + def stage2_init(self, c1): + self.stage2_reset() + self.n_cores =3D c1.n_cores + self.n_pipelines =3D c1.n_pipelines + + self.arr2_pipelines2cores =3D [''] * self.n_pipelines + + core_idx =3D 0 + while (core_idx < self.n_cores): + self.cores[core_idx].pipelines =3D c1.cores[core_idx].pipeli= nes + + self.bitpos_load(core_idx) + core_idx +=3D 1 + #end while=20 + #end function stage2_init + + + #------------------------------------------------------------- + def stage2_log(self): + tmp_file_comment =3D "" + if(enable_stage2_traceout =3D=3D 1): + print('STAGE2: ', end=3D'') + tmp_file_comment +=3D 'STAGE2: ' + + for i in range(0, self.n_cores): + mask =3D len2mask(self.cores[i].n_pipelines) + pipelines_ht0 =3D self.bitpos_apply((~self.cores[i].coun= ter) & mask, \ + self.cores[i].bitpos, \ + self.cores[i].n_pipelines) + + pipelines_ht1 =3D self.bitpos_apply(self.cores[i].counte= r, \ + self.cores[i].bitpos, \ + self.cores[i].n_pipelines) + + print('C%dHT0 =3D [' %i, end=3D'') + tmp_file_comment +=3D "C{}HT0 =3D [".format(i) + tmp_file_comment +=3D bitstring_write(pipelines_ht0, sel= f.n_pipelines) + + print(']\tC%dHT1 =3D [' %i, end=3D'') + tmp_file_comment +=3D "]\tC{}HT1 =3D [".format(i) + tmp_file_comment +=3D bitstring_write(pipelines_ht1, sel= f.n_pipelines) + print(']\t', end=3D'') + tmp_file_comment +=3D ']\t' + #end for + print('') + self.stage2_file_comment =3D tmp_file_comment + #endif + + #check if file traceing is enabled =20 + if(enable_stage2_fileout !=3D 1): + return=20 + #spit out the combination to file + self.stage2_process_file() + + #end function stage2_log + + #------------------------------------------------------------- + def stage2_updateCoresInBuf(self, nPipeline, sCore): + rePipeline =3D self._fileTrace.arr_pipelines[nPipeline] + rePipeline =3D rePipeline.replace("[","\[").replace("]","\]") + reCore =3D 'core\s*=3D\s*((\d*)|(((s|S)\d)?(c|C)[1-9][0-9]*)).*\= n' + sSubs =3D 'core =3D ' + sCore + '\n' + #sSubs =3D 'core =3D ' + self._fileTrace.in_physical_cores[sCore= ] + '\n' + + reg_pipeline =3D re.compile(rePipeline) + search_match =3D reg_pipeline.search(self._fileTrace.in_buf) + + if(search_match): + pos =3D search_match.start() + substr1 =3D self._fileTrace.in_buf[:pos] + substr2 =3D self._fileTrace.in_buf[pos:] + substr2 =3D re.sub(reCore, sSubs, substr2,1) + self._fileTrace.in_buf =3D substr1 + substr2 + #endif + #end function stage2_updateCoresInBuf + + + #------------------------------------------------------------- + def pipelines2cores(self, n, n_bits, nCore, bHT): + if (n_bits > 64): + return + + i =3D n_bits - 1 + pipeline_idx =3D 0 + while (i >=3D 0): + cond =3D (n & (1 << i)) + if (cond): + #update the pipelines array to match the core + # only in case of cond match + # PIPELINE0 and core 0 are reserved + if(bHT): + tmpCore =3D fileTrace.in_physical_cores[nCore] + 'h' + self.arr2_pipelines2cores[pipeline_idx] =3D tmpCore + else: + self.arr2_pipelines2cores[pipeline_idx] =3D \ + fileTrace.in_physical_cores[nCore] + #endif + #endif + i -=3D 1 + pipeline_idx +=3D 1 + #end while + + #end function pipelines2cores + + #------------------------------------------------------------- + def stage2_process_file(self): + outFileName =3D os.path.join(self._fileTrace.out_path, \ + self._fileTrace.prefix_outfile) + + for i in range(0, self.n_cores): + mask =3D len2mask(self.cores[i].n_pipelines) + pipelines_ht0 =3D self.bitpos_apply((~self.cores[i].counter)= & mask, \ + self.cores[i].bitpos, \ + self.cores[i].n_pipelines) + + pipelines_ht1 =3D self.bitpos_apply(self.cores[i].counter, \ + self.cores[i].bitpos, \ + self.cores[i].n_pipelines) + =20 + outFileName +=3D '_' + outFileName +=3D str(pipelines_ht0) + outFileName +=3D '_' + outFileName +=3D str(pipelines_ht1) + outFileName +=3D 'HT' =20 + + #update pipelines to core mapping + self.pipelines2cores(pipelines_ht0, self.n_pipelines, i , Fa= lse) + self.pipelines2cores(pipelines_ht1, self.n_pipelines, i, Tru= e) + #end for + + # update the in_buf as per the arr_pipelines2cores=20 + for pipeline_idx in range(len(self.arr2_pipelines2cores)): + self.stage2_updateCoresInBuf(pipeline_idx, self.arr2_pipelin= es2cores[pipeline_idx]) + =20 + #by now the in_buf is all set to be written to file=20 + outFileName +=3D self._fileTrace.suffix_outfile + outputFile =3D open(outFileName, "w") + + #write out the comments + outputFile.write("; =3D=3D=3D=3D=3D=3D=3D=3D=3D Pipeline-to-Core= Mapping =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D\= n") + outputFile.write("; Generated from file {}\n".format(self._fileT= race.in_file_namepath)) + outputFile.write("; Input pipelines =3D {}\n; Input cores =3D {}= \n" \ + .format(fileTrace.arr_pipelines, fileTrace.in_physical_c= ores)) + + strTruncated =3D ("", "(Truncated)") [self._fileTrace.ncores_tru= ncated] + outputFile.write("; N_PIPELINES =3D {} N_CORES =3D {} {}\n"\ + .format(self._fileTrace.n_pipelines, self._fileTrace.n_c= ores, strTruncated)) + + outputFile.write("; {}\n".format(self.stage0_file_comment)) #sta= ge0 comment + outputFile.write("; {}\n".format(self.stage1_file_comment)) #sta= ge1 comment + outputFile.write("; {}\n".format(self.stage2_file_comment)) #sta= ge2 comment + outputFile.write("; =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D\n") + outputFile.write(";\n") + + outputFile.write(self._fileTrace.in_buf) + outputFile.flush() + outputFile.close() + + #end function stage2_process_file=20 + + #------------------------------------------------------------- + def stage2_process(self): + i =3D 0 + while(i < self.n_cores): + self.cores[i].counter_max =3D len2mask(self.cores[i].n_pipel= ines - 1) + i +=3D 1 + #end while + =20 + self.pos =3D self.n_cores - 1 + while True: + if (self.pos =3D=3D self.n_cores - 1): + self.stage2_log() + + if (self.cores[self.pos].counter =3D=3D self.cores[self.pos]= .counter_max): + if (self.pos =3D=3D 0): + return + #endif + + self.cores[self.pos].counter =3D 0 + self.pos -=3D 1 + continue + #endif + + self.cores[self.pos].counter +=3D 1 + + if(self.pos < self.n_cores - 1): + self.pos +=3D 1 + #endif + #end while +#end class Context2 + +#-----------------------------------------------------------------------= ------=20 +class FileTrace: + #initialize default parameters + def __init__(self,filenamepath): + self.in_file_namepath =3D os.path.abspath(filenamepath) + self.in_filename =3D os.path.basename(self.in_file_namepath)=20 + self.in_path =3D os.path.dirname(self.in_file_namepath) + =20 + #set output folder same as input + self.out_path =3D self.in_path + + filenamesplit =3D self.in_filename.split('.') + self.prefix_outfile =3D filenamesplit[0] + self.suffix_outfile =3D ".cfg" + self.in_buf =3D None + self.arr_pipelines =3D [] # holds the positions of search + =20 + self.max_cores =3D 15=20 + self.max_pipelines =3D 15 + + self.in_physical_cores =3D None + self.hyper_thread =3D None + + # save the num of pipelines determined from input file + self.n_pipelines =3D 0 + # save the num of cores input (or the truncated value)=20 + self.n_cores =3D 0 + self.ncores_truncated =3D False + =20 + #end init + + def print_TraceFile(self): + print("self.in_file_namepath =3D ", self.in_file_namepath)=20 + print("self.in_filename =3D ", self.in_filename) + print("self.in_path =3D ", self.in_path) + print("self.out_path =3D ", self.out_path) + print("self.prefix_outfile =3D ", self.prefix_outfile) + print("self.suffix_outfile =3D ", self.suffix_outfile) + print("self.in_buf =3D ", self.in_buf) + print("self.arr_pipelines =3D", self.arr_pipelines) + print("self.in_physical_cores", self.in_physical_cores) + print("self.hyper_thread", self.hyper_thread) + #end function print_TraceFile + +#end class FileTrace + + +#-----------------------------------------------------------------------= ------ +# main process method +# +def process(n_cores, n_pipelines, fileTrace): + if (n_cores =3D=3D 0): + sys.exit('N_CORES is 0, exiting') + #endif + + if (n_pipelines =3D=3D 0): + sys.exit('N_PIPELINES is 0, exiting') + #endif + + if (n_cores > n_pipelines): + print('\nToo many cores, truncating N_CORES to N_PIPELINES') + n_cores =3D n_pipelines + fileTrace.ncores_truncated =3D True + #endif + fileTrace.n_pipelines =3D n_pipelines + fileTrace.n_cores =3D n_cores + + strTruncated =3D ("", "(Truncated)") [fileTrace.ncores_truncated] + print("N_PIPELINES =3D {}, N_CORES =3D {} {}" \ + .format(n_pipelines,n_cores, strTruncated)) + print("-------------------------------------------------------------= --") + + c0 =3D Context0() + c1 =3D Context1() + c2 =3D Context2() + + #initialize the class variables + c1._fileTrace =3D fileTrace + c2._fileTrace =3D fileTrace + + c0.stage0_init(n_cores, n_pipelines, c1, c2) + c0.stage0_process() + +#end function process + + + +#-----------------------------------------------------------------------= --- +def validate_core(core): + match =3D reg_phycore.match(core) + if(match): + return True + else: + return False + #endif +#end function validate_core + + +#-----------------------------------------------------------------------= --- +def validate_phycores(phy_cores): + #eat up whitespaces + phy_cores =3D phy_cores.strip().split(',') + + #check if the core list is unique + if(len(phy_cores) !=3D len(set(phy_cores))): + print('list of physical cores has duplicates') + return False + #endif + + for core in phy_cores: + if(validate_core(core) !=3D True): + print('invalid physical core specified.') + return None + else: + return phy_cores + #endif + #endfor +#end function validate_phycores + +#-----------------------------------------------------------------------= --- +def scanconfigfile(fileTrace): + #debug + #fileTrace.print_TraceFile() + + # open file + filetoscan =3D open(fileTrace.in_file_namepath, 'r') + fileTrace.in_buf =3D filetoscan.read() + + #reset iterator on open file + filetoscan.seek(0) + + # scan input file for pipelines + # master pipelines to be ignored + pattern_pipeline =3D r'\[PIPELINE\d*\]' + pattern_mastertype =3D r'type\s*=3D\s*MASTER' + + pending_pipeline =3D False + for line in filetoscan: + match_pipeline =3D re.search(pattern_pipeline, line) + match_type =3D re.search('type\s*=3D', line) + match_mastertype =3D re.search(pattern_mastertype, line) + + if(match_pipeline): + sPipeline =3D line[match_pipeline.start():match_pipeline.end= ()] + #sPipeline =3D sPipeline.strip('[]') + pending_pipeline =3D True + elif(match_type): + # found a type definition... + if(match_mastertype =3D=3D None): + # and this is not a master pipeline... + if(pending_pipeline =3D=3D True): + # add it to the list of pipelines to be mapped + fileTrace.arr_pipelines.append(sPipeline) + pending_pipeline =3D False + else: + # and this is a master pipeline... + # ignore the current and move on to next + sPipeline =3D "" + pending_pipeline =3D False + #endif + #endif + #endfor + filetoscan.close() + + # validate if pipelines are unique + if(len(fileTrace.arr_pipelines) !=3D len(set(fileTrace.arr_pipelines= ))): + sys.exit('Error: duplicate pipelines in input file') + #endif + + num_pipelines =3D len(fileTrace.arr_pipelines) + num_cores =3D len(fileTrace.in_physical_cores) + + #debug + #print(fileTrace.matches_pipeline) + print("-------------------Pipeline-to-core mapping------------------= --") + print("Input pipelines =3D {}\nInput cores =3D {}" \ + .format(fileTrace.arr_pipelines, fileTrace.in_physical_cores= )) + + #input configuration file validations goes here + if (num_cores > fileTrace.max_cores): + sys.exit('Error: number of cores specified > max_cores (%d)' %fi= leTrace.max_cores) + + if (num_pipelines > fileTrace.max_pipelines): + sys.exit('Error: number of pipelines in input cfg file > max_pip= elines (%d)' %fileTrace.max_pipelines) + + #call process to generate pipeline-to-core mapping, trace and log + process(num_cores, num_pipelines, fileTrace) + +#end function scanconfigfile + + +#-----------------------------------------------------------------------= ------ +# python trick - so that our Python files can act as either reusable mod= ules,=20 +# or as standalone programs +if __name__ =3D=3D "__main__": + + parser =3D argparse.ArgumentParser(description=3D'mappipelines') + + reqNamedGrp =3D parser.add_argument_group('required named args') + reqNamedGrp.add_argument('-i', '--input-file', type=3Dargparse.FileT= ype('r'), help=3D'Input config file', required=3DTrue) + + #--physical-cores =E2=80=9C, , =E2=80=A6=E2=80=9D, with = =3D sc + reqNamedGrp.add_argument('-pc', '--physical-cores', type=3Dvalidate_= phycores, help=3D'''Enter available CPU cores in format:\",,.= ..\" + where each core format: \"sc\" + where SOCKETID=3D{0..9}, COREID=3D{1-99}''', required=3DTrue) + + #add optional arguments + parser.add_argument('-ht', '--hyper-thread', help=3D'enable/disable = hyper threading. default is ON', default=3D'ON', choices=3D['ON','OFF']) + + parser.add_argument('-nO', '--no-output-file', help=3D'disable outpu= t config file generation. Output file generation is enabled by default', = action=3D"store_true") + + args =3D parser.parse_args() + + # create object of FileTrace and initialise + fileTrace =3D FileTrace(args.input_file.name) + fileTrace.in_physical_cores =3D args.physical_cores + fileTrace.hyper_thread =3D args.hyper_thread + + if(fileTrace.hyper_thread =3D=3D 'OFF'): + print("!!!!disabling stage2 HT!!!!") + enable_stage2_traceout =3D 0 + enable_stage2_fileout =3D 0 + #endif + + if(args.no_output_file =3D=3D True): + print("!!!!disabling stage1 and stage2 fileout!!!!") + enable_stage1_fileout =3D 0 + enable_stage2_fileout =3D 0 + #endif + + scanconfigfile(fileTrace) + +#end main --=20 2.5.5