Fechar

@MastersThesis{Souza:2018:AvTéAv,
               author = "Souza, Carlos Renato de",
                title = "Avalia{\c{c}}{\~a}o de t{\'e}cnicas avan{\c{c}}adas de 
                         comunica{\c{c}}{\~a}o MPI na execu{\c{c}}{\~a}o do modelo 
                         BRAMS",
               school = "Instituto Nacional de Pesquisas Espaciais (INPE)",
                 year = "2018",
              address = "S{\~a}o Jos{\'e} dos Campos",
                month = "2018-05-28",
             keywords = "BRMAS, shared memory, modelo num{\'e}rico de previs{\~a}o, MPI, 
                         numerical model forecast.",
             abstract = "O Centro de Previs{\~a}o de Tempo e Estudos Clim{\'a}ticos 
                         (CPTEC) do Instituto Nacional de Pesquisas Espaciais (INPE) 
                         executa operacionalmente modelos num{\'e}ricos de previs{\~a}o 
                         de tempo e clima. Em particular, utiliza o modelo regional BRAMS 
                         (Brazilian developments on the Regional Atmospheric Modeling 
                         System). Esse modelo foi paralelizado com a biblioteca de 
                         comunica{\c{c}}{\~a}o por troca de mensagens Message Passing 
                         Interface (MPI) e {\'e} executado no supercomputador Tup{\~a} do 
                         CPTEC, o qual {\'e} composto de mais de um milhar de n{\'o}s de 
                         processamento. Cada n{\'o} possui dois processadores 
                         multin{\'u}cleos numa arquitetura de mem{\'o}ria compartilhada. 
                         Na execu{\c{c}}{\~a}o paralela do modelo BRAMS, seu 
                         dom{\'{\i}}nio de c{\'a}lculo {\'e} dividido em 
                         subdom{\'{\i}}nios entre processos executados nos n{\'u}cleos 
                         dos muitos n{\'o}s. Eventuais depend{\^e}ncias de dados entre 
                         subdom{\'{\i}}nios diferentes implicam na troca de mensagens MPI 
                         entre processos sejam eles do mesmo n{\'o} ou n{\~a}o. O BRAMS 
                         utiliza troca de mensagens MPI bilateral no modo 
                         ass{\'{\i}}ncrono e sem bloqueio, dispon{\'{\i}}vel desde a 
                         primeira vers{\~a}o do padr{\~a}o MPI. O padr{\~a}o MPI tem 
                         evolu{\'{\i}}do, oferecendo novas t{\'e}cnicas para otimizar a 
                         comunica{\c{c}}{\~a}o entre processos. Assim, visando otimizar o 
                         desempenho da comunica{\c{c}}{\~a}o, o padr{\~a}o MPI-2 
                         introduziu a chamada comunica{\c{c}}{\~a}o unilateral por acesso 
                         remoto {\`a} mem{\'o}ria, que permite a um processo pode fazer 
                         leituras ou escritas por meio de fun{\c{c}}{\~o}es MPI na 
                         mem{\'o}ria de outro, seja do mesmo n{\'o} ou n{\~a}o, 
                         permitindo a troca de dados entre processos, sem que o processo 
                         alvo participe da comunica{\c{c}}{\~a}o explicitamente. A 
                         comunica{\c{c}}{\~a}o unilateral foi aperfei{\c{c}}oada no 
                         MPI-3, mas uma nova funcionalidade foi introduzida, a 
                         comunica{\c{c}}{\~a}o unilateral por mem{\'o}ria compartilhada, 
                         que permite a processos MPI executados num mesmo n{\'o} definirem 
                         uma janela comum de mem{\'o}ria local e efetuar leituras e 
                         escritas diretas na {\'a}rea da janela de outros processos 
                         locais. Este trabalho visa avaliar o desempenho da 
                         execu{\c{c}}{\~a}o paralela do modelo regional BRAMS ao utilizar 
                         a comunica{\c{c}}{\~a}o unilateral de mem{\'o}ria compartilhada 
                         na comunica{\c{c}}{\~a}o intra-n{\'o} e mantendo a 
                         comunica{\c{c}}{\~a}o bilateral ass{\'{\i}}ncrona e sem 
                         bloqueio na comunica{\c{c}}{\~a}o inter-n{\'o} e preservando a 
                         mesma divis{\~a}o de dom{\'{\i}}nio de sua vers{\~a}o paralela 
                         original. ABSTRACT: The Center for Weather Forecasts and Climate 
                         Studies (CPTEC) of the Brazilian National Institute for Space 
                         Research (INPE) executes several climate and weather numerical 
                         forecast models on an operational basis, specifically using the 
                         regional model nominated BRAMS (Brazilian developments on the 
                         Regional Atmospheric Modeling System). This model was parallelized 
                         using the Message Passing Interface (MPI) communication library, 
                         being executed by the CPTECs {"}Tup{\~a}{"}, a supercomputer 
                         composed of hundreds of processing nodes. Each node has two 
                         multi-core processors in a shared memory architecture. In the 
                         paralell execution of BRAMS, its calculation domain is divided 
                         among processes executed in the cores of the many nodes. Data 
                         dependencies between different subdomains require the exchange of 
                         MPI messages between the processes, either intra-node or 
                         inter-node. BRAMS employs asynchronous non-blocking point-to-point 
                         communication, available since the first version of the MPI 
                         standard. The MPI standard has evolved through the years bringing 
                         new techniques to optimize the communication between processes. 
                         Thus MPI-2 introduced the one-sided communication by remote memory 
                         access. It allows a process to execute reads or writes using MPI 
                         functions to the memory of other process, either in the same node 
                         or not, exchanging data betweem processes without the explicit 
                         cooperation of the target process. One-sided communication was 
                         improved in the MPI-3 standard, but a new technique was added, the 
                         shared memory one-sided communication. MPI processes executed in 
                         the same computational node may define a common shared memory 
                         window and execute direct reads and writes in the window part of 
                         another process. The purpose of this work is to evaluate the 
                         parallel performance of the BRAMS model using the shared memory 
                         one-sided communication for the intra-node communication while 
                         keeping the asynchronous non-blocking point-topoint inter-node 
                         communication, and preserving the domain decomposition of its 
                         original parallel version.",
            committee = "Mendes, Celso Luiz (presidente) and Stephany, Stephan (orientador) 
                         and Panetta, Jairo (orientador) and Campos Velho, Haroldo Fraga de 
                         and Fazenda, {\'A}lvaro Luiz",
         englishtitle = "Evaluation of advanced techniques for MPI communication in the 
                         execution of the BRAMS model",
             language = "pt",
                pages = "108",
                  ibi = "8JMKD3MGP3W34R/3R6QKR8",
                  url = "http://urlib.net/ibi/8JMKD3MGP3W34R/3R6QKR8",
           targetfile = "publicacao.pdf",
        urlaccessdate = "27 abr. 2024"
}


Fechar