@MASTERSTHESIS{ 2021:978431074, title = {Self-attention for improving the differentiable rendering pipeline in image 3D reconstruction}, year = {2021}, url = "http://tede2.pucrs.br/tede2/handle/tede/10117", abstract = "Recent studies on Differentiable Rendering models related to 3D reconstruction focus on fully convolutional-based models for data feature extraction or for the decoding process. On the other hand, computer vision tasks such as image recognition, segmentation, image generation, and object detection is benefiting largely from using fully self-attention approaches known as Transformers. Due to the recent success of the Transformer backbone models applied to computer vision, in this work we aim to explore four different approaches of self-attention-based models for implicit 3D object reconstruction from images. In our first approach, we have implemented the SAGAN Self-Attention layers together with convolutions layers; in our second approach, we have implemented a patchwise self-attention model to completely replace the convolutional encoder; next, we have implemented a Transformer model called Pyramid Vision Transformer to replace the convolutional based encoder from the DVR model; finally, we have implemented the Nystr?mformer model, an optimizer to reduce the computational cost and to improve the feature extracting capability. Considering all approaches, our results have shown that we can achieve competitive results by using Transformer models, as well as adding an optimizer to reduce the computational cost. By applying the optimization model and reducing the computational cost, it was possible to modify the decoder module to increase the reconstruction results, resulting in improvements of up to 8.5% compared to the baseline approaches.", publisher = {Pontif?cia Universidade Cat?lica do Rio Grande do Sul}, scholl = {Programa de P?s-Gradua??o em Ci?ncia da Computa??o}, note = {Escola Polit?cnica} }