author = "Lima Francisco, Phyllipe de Souza and Guerra, Eduardo Martins and 
                         Meirelles, Paulo and Kanashiro, Lucas and Silva, H{\'e}lio and 
                         Silveira, F{\'a}bio Fagundes",
          affiliation = "{Instituto Nacional de Pesquisas Espaciais (INPE)} and {Instituto 
                         Nacional de Pesquisas Espaciais (INPE)} and {Universidade de 
                         S{\~a}o Paulo (USP)} and {Universidade de S{\~a}o Paulo (USP)} 
                         and {Instituto Nacional de Pesquisas Espaciais (INPE)} and 
                         {Universidade Federal de S{\~a}o Paulo (UNIFESP)}",
                title = "A metrics suite for code annotation assessment",
              journal = "Journal of Systems and Software",
                 year = "2018",
               volume = "137",
                pages = "163--183",
                month = "Mar.",
             keywords = "Code annotation, Software metrics, Thresholds.",
             abstract = "Code annotation is a language feature that enables the 
                         introduction of custom metadata on programming elements. In Java, 
                         this feature was introduced on version 5, and today it is widely 
                         used by main enterprise application frameworks and APIs. Although 
                         this language feature potentially simplifies metadata 
                         configuration, its abuse and misuse can reduce source code 
                         readability and complicate its maintenance. The goal of this paper 
                         is to propose software metrics regarding annotations in the source 
                         code and analyze their distribution in real-world projects. We 
                         have defined a suite of metrics to assess characteristics of the 
                         usage of source code annotations in a code base. Our study 
                         collected data from 24947 classes extracted from open source 
                         projects to analyze the distribution of the proposed metrics. We 
                         developed a tool to automatically extract the metrics and provide 
                         a full report on annotations usage. Based on the analysis of the 
                         distribution, we defined an appropriate approach for the 
                         calculation of thresholds to interpret the metric values. The 
                         results allow the assessment of annotated code characteristics. 
                         Using the thresholds values, we proposed a way to interpret the 
                         use of annotations, which can reveal potential problems in the 
                         source code.",
                  doi = "10.1016/j.jss.2017.11.024",
                  url = "http://dx.doi.org/10.1016/j.jss.2017.11.024",
                 issn = "0164-1212",
             language = "en",
           targetfile = "lima_metrics.pdf",
        urlaccessdate = "26 nov. 2020"