%%% ==================================================================== %%% CRS4 Visual Computing Group Bibliography %%% ==================================================================== @String{inst-CRS4 = "CRS4, Center for Advanced Studies, Research, and Development in Sardinia"} @String{inst-CRS4:adr = "Cagliari, Italy"} @String{inst-EPFL = "Swiss Federal Institute of Technology"} @String{inst-EPFL:adr = "Lausanne, Switzerland"} %======================================================================= % Journal abbreviations: @String{j-CG-FORUM = "Computer Graphics Forum"} @String{j-COMPUTER = "Computer"} %======================================================================= % Publishers and their addresses: @String{pub-ACM = "ACM Press"} @String{pub-ACM:adr = "New York, NY, USA"} @String{pub-AP-PROFESSIONAL = "AP Professional"} @String{pub-AP-PROFESSIONAL:adr = "Boston, MA, USA"} @String{pub-AW = "Ad{\-d}i{\-s}on-Wes{\-l}ey"} @String{pub-AW:adr = "Boston, MA, USA"} @String{pub-BLACKWELL = "Blackwell Publishers"} @String{pub-BLACKWELL:adr = "Oxford, England"} @String{pub-ELS = "Elsevier Science Publishers B. V."} @String{pub-ELS:adr = "Amsterdam, The Netherlands"} @String{pub-EUROGRAPHICS = "Eurographics Association"}, @String{pub-EUROGRAPHICS:adr = "Aire-la-Ville, Switzerland"}, @String{pub-IEEE = "IEEE Computer Society Press"} @String{pub-IEEE:adr = "1109 Spring Street, Suite 300, Silver Spring, MD 20910, USA"} @String{pub-IOS = "IOS"} @String{pub-IOS:adr = "Amsterdam, The Netherlands"} @String{pub-PH = "Pren{\-}tice-Hall"} @String{pub-PH:adr = "Englewood Cliffs, NJ 07632, USA"} @String{pub-SV = "Springer Verlag"} @String{pub-SV:adr = "New York, NY, USA"} @String{pub-VIEWEG = "Vieweg Verlag"} @String{pub-VIEWEG:adr = "Wiesbaden, Germany"} @String{pub-WILEY = "Wiley"} @String{pub-WILEY:adr = "New York, NY, USA"} @String{pub-WILEY-COMPUTER = "Wiley Computer Publishers"} @String{pub-WILEY-COMPUTER:adr = "New York, NY, USA"} @String{pub-WORLD-SCI = "World Scientific Publishing Co."} @String{pub-WORLD-SCI:adr = "Singapore; Philadelphia, PA, USA; River Edge, NJ, USA"} %======================================================================= % Series abbreviations: @String{ser-LNCS = "Lecture Notes in Computer Science"} @String{ser-EG = "Eurographics Series"} %======================================================================= % Bibliography - Scripta %======================================================================= %################################ %### 2024 %################################ @InProceedings{Pintore:2024:A3M, idxkey = {}, idxproject = {AIN2,HPCCN,XDATA}, author = {Giovanni Pintore and Marco Agus and Enrico Gobbetti}, title = {Automatic {3D} modeling and exploration of indoor structures from panoramic imagery}, booktitle = {SIGGRAPH Asia 2024 Courses (SA Courses '24)}, month = {December}, year = {2024}, publisher = {ACM Press}, thumbnail = {https://www.crs4.it/vic/data/papers/sigasia2024-course-pano.jpg}, doi = {10.1145/3680532.3689580}, } @InProceedings{Dulecha:2024:DON, idxkey = {}, idxproject = {REFLEX, XDATA}, author = {Tinsae Dulecha and Leonardo Righetto and Ruggero Pintus and Enrico Gobbetti and Andrea Giachetti}, title = {{Disk-NeuralRTI}: Optimized {NeuralRTI} Relighting through Knowledge Distillation}, abstract = {Relightable images created from Multi-Light Image Collections (MLICs) are among the most employed models for interactive object exploration in cultural heritage (CH). In recent years, neural representations have been shown to produce higher-quality images at similar storage costs to the more classic analytical models such as Polynomial Texture Maps (PTM) or Hemispherical Harmonics (HSH). However, the Neural RTI models proposed in the literature perform the image relighting with decoder networks with a high number of parameters, making decoding slower than for classical methods. Despite recent efforts targeting model reduction and multi-resolution adaptive rendering, exploring high-resolution images, especially on high-pixel-count displays, still requires significant resources and is only achievable through progressive rendering in typical setups. In this work, we show how, by using knowledge distillation from an original (teacher) Neural RTI network, it is possible to create a more efficient RTI decoder (student network). We evaluated the performance of the network compression approach on existing RTI relighting benchmarks, including both synthetic and real datasets, and on novel acquisitions of high-resolution images. Experimental results show that we can keep the student prediction close to the teacher with up to 80 percent parameter reduction and almost ten times faster rendering when embedded in an online viewer.}, booktitle = {STAG: Smart Tools and Applications in Graphics}, month = {November}, pages = {}, year = {2024}, doi = {10.2312/stag.20241340}, url = {https://www.crs4.it/vic/data/papers/stag2024-disk_neuralrti.pdf}, } @InProceedings{Pintore:2024:DDI, idxkey = {}, idxproject = {AIN2,HPCCN}, author = {Giovanni Pintore and Marco Agus and Alberto Signoroni and Enrico Gobbetti}, title = {{DDD}: Deep indoor panoramic Depth estimation with Density maps consistency}, abstract = {We introduce a novel deep neural network for rapid and structurally consistent monocular 360-degree depth estimation in indoor environments. The network infers a depth map from a single gravity-aligned or gravity-rectified equirectangular image of the environment, ensuring that the predicted depth aligns with the typical depth distribution and features of cluttered interior spaces, which are usually enclosed by walls, ceilings, and floors. By leveraging the distinct characteristics of vertical and horizontal features in man-made indoor environments, we introduce a lean network architecture that employs gravity-aligned feature flattening and specialized vision transformers that utilize the input’s omnidirectional nature, without segmentation into patches and positional encoding. To enhance the structural consistency of the predicted depth, we introduce a new loss function that evaluates the consistency of density maps by projecting points derived from the inferred depth map onto horizontal and vertical planes. This lightweight architecture has very small computational demands, provides greater structural consistency than competing methods, and does not require the explicit imposition of strong structural priors.}, booktitle = {STAG: Smart Tools and Applications in Graphics}, month = {November}, year = {2024}, doi = {10.2312/stag.20241336}, url = {https://www.crs4.it/vic/data/papers/stag2024-ddd.pdf}, } @InProceedings{Shah:2024:VVS, idxkey = {}, idxproject = {AIN2,HPCCN}, author = {Uzair Shah and Sara Jashari and Muhammad Tukur and Giovanni Pintore and Enrico Gobbetti and Jens Schneider and Marco Agus}, title = {{VISPI}: Virtual Staging Pipeline for Single Indoor Panoramic Images}, abstract = {Taking a 360-degree image is the quickest and most cost-effective way to capture the entire environment around the viewer in a form that can be directly exploited for creating immersive content. In this work, we introduce novel solutions for the virtual staging of indoor environments, supporting automatic emptying, object insertion, and relighting. Our solution, dubbed VISPI (Virtual Staging Pipeline for Single Indoor Panoramic Images), integrates data-driven processing components, that take advantage of the analysis of knowledge learned from massive data collections, within a real-time rendering and editing system, allowing for interactive restaging of indoor scenes. Key components of VISPI include: i) a holistic architecture based on a multi-task vision transformer for extracting geometry, semantic, and material information from a single panoramic image, ii) a lighting model based on spherical Gaussians, iii) a method for lighting estimation from the geometric, semantic, and material signals, and iv) a real-time editing and rendering component. The proposed framework provides an interactive and user-friendly solution for creating immersive visualizations of indoor spaces. We present a preliminary assessment of VISPI using a synthetic dataset -- Structured3D -- and demonstrate its application in creating restaged indoor scenes.}, booktitle = {STAG: Smart Tools and Applications in Graphics}, month = {November}, year = {2024}, doi = {10.2312/stag.20241334}, url = {https://www.crs4.it/vic/data/papers/stag2024-vispi.pdf}, } @InProceedings{Jashari:2024:EAS, idxkey = {}, idxproject = {AIN2,HPCCN}, author = {Sara Jashari and Muhammad Tukur and Yehia Boraey and Uzair Shah and Mahmood Alzubaidi and Giovanni Pintore and Enrico Gobbetti and Alberto Jaspe-Villanueva and Jens Schneider and Noora Fetais and Marco Agus}, title = {Evaluating {AI-based} static stereoscopic rendering of indoor panoramic scenes}, abstract = {Panoramic imaging has recently become an extensively used technology for the representation and exploration of indoor environments. Panoramic cameras generate omnidirectional images that provide a comprehensive 360-degree view, making them a valuable tool for applications such as virtual tours in real estate, architecture, and cultural heritage. However, constructing truly immersive experiences from panoramic images presents challenges, particularly in generating panoramic stereo pairs that offer consistent depth cues and visual comfort across all viewing directions. Traditional stereo-imaging techniques do not directly apply to spherical panoramic images, requiring complex processing to avoid artifacts that can disrupt immersion. To address these challenges, various imaging and processing technologies have been developed, including multi-camera systems and computational methods that generate stereo images from a single panoramic input. Although effective, these solutions often involve complicated hardware and processing pipelines. Recently, deep learning approaches have emerged, enabling novel view generation from single panoramic images. While these methods show promise, they have not yet been thoroughly evaluated in practical scenarios. This paper presents a series of evaluation experiments aimed at assessing different technologies for creating static stereoscopic environments from omnidirectional imagery, with a focus on 3DOF immersive exploration. A user study was conducted using a WebXR prototype and a Meta Quest 3 headset to quantitatively and qualitatively compare traditional image composition techniques with AI-based methods. Our results indicate that while traditional methods provide a satisfactory level of immersion, AI-based generation is nearing a quality level suitable for deployment in web-based environments.}, booktitle = {STAG: Smart Tools and Applications in Graphics}, month = {November}, year = {2024}, doi = {10.2312/stag.20241333}, url = {https://www.crs4.it/vic/data/papers/stag2024-panostereoeval.pdf}, } @InProceedings{Tukur:2024:VST, idxkey = {}, idxproject = {AIN2}, author = {Muhammad Tukur and Yehia Boraey and Sara Jashari and Alberto Jaspe-Villanueva and Uzair Shah and Mahmood Alzubaidi and Giovanni Pintore and Enrico Gobbetti and Jens Schneider and Marco Agus}, title = {Virtual Staging Technologies for the Metaverse}, abstract = {This paper presents an exploration of virtual staging technologies within the metaverse, emphasizing two primary pipelines for creating and exploring immersive indoor environments: an AI-based image processing pipeline and a LIDAR-based pipeline. The AI-based image processing pipeline leverages advanced AI algorithms for tasks such as clutter removal, semantic style transfer, and super-resolution, enabling rapid generation of high-quality, photorealistic virtual environments from single panoramic images. The LIDAR-based pipeline captures accurate 3D models of indoor spaces, facilitating immersive editing and collaborative design through real-time interaction with high-fidelity virtual environments. A qualitative comparative analysis of these technologies highlights their strengths and limitations in various applications. The practical implications of these pipelines are discussed, particularly their potential to transform industries such as real estate, furniture retail, interior design, construction, remote collaboration, and immersive training. The paper concludes with suggestions for future research, including conducting user studies, integrating the two pipelines, and optimizing technologies for mobile and edge devices to enhance accessibility and usability.}, booktitle = {Proc. IEEE iMeta}, pages = {206--213}, year = {2024}, doi = {10.1109/iMETA62882.2024.10807986}, url = {https://www.crs4.it/vic/data/papers/imeta2024-vstm.pdf}, } @article{Righetto:2024:EUV, idxkey = {TOP-THEME-ACQUISITION}, idxproject = {REFLEX}, author = {Leonardo Righetto and Mohammad Khademizadeh and Andrea Giachetti and Federico Ponchio and Davit Gigilashvili and Fabio Bettio and Enrico Gobbetti}, title = {Efficient and user-friendly visualization of neural relightable images for cultural heritage applications}, journal = {ACM Journal on Computing and Cultural Heritage (JOCCH)}, volume = {17}, number = {4}, month = , pages = {54:1--54:24}, year = 2024, doi = {10.1145/3690390}, abstract = { We introduce an innovative multiresolution framework for encoding and interactively visualizing large relightable images using a neural reflectance model derived from a state-of-the-art technique. The framework is seamlessly integrated into a scalable multi-platform framework that supports adaptive streaming and exploration of multi-layered relightable models in web settings. To enhance efficiency, we optimized the neural model, simplified decoding, and implemented a custom WebGL shader specific to the task, eliminating the need for deep-learning library integration in the code. Additionally, we introduce an efficient level-of-detail management system supporting fine-grained adaptive rendering through on-the-fly resampling in latent feature space. The resulting viewer facilitates interactive neural relighting of large images. Its modular design allows the incorporation of functionalities for Cultural Heritage analysis, such as loading and simultaneous visualization of multiple relightable layers with arbitrary rotations. }, thumbnail = {https://www.crs4.it/vic/data/papers/jocch2024-neurolime.jpg}, url = {https://dl.acm.org/doi/pdf/10.1145/3690390}, } @inproceedings{Pintus:2024:ABM, idxkey = {}, idxproject = {XDATA,REFLEX}, author = {Ruggero Pintus and Antonio Zorcolo and Enrico Gobbetti}, title = {Applying {BRDF} Monotonicity for Refined Shading Normal Extraction from Multi-Light Image Collections}, booktitle = {The 22nd Eurographics Workshop on Graphics and Cultural Heritage}, month = , pages = {1--6}, year = 2024, doi={10.2312/gch.20241241}, abstract = { Multi-Light Image Collections (MLICs) are often transformed into geometric normals and BRDF normals for visual exploration under novel illumination. However, discrepancies between the chosen BRDF space and the complete optical behavior of objects, along with the possible presence of non-local lighting effects in measurements, often lead to sub-optimal visual outcomes even with the most accurate geometric normal recovery. In this paper, we introduce a modular component designed to convert the geometric normals into well-behaved shading normals, under the common and general assumption that the reflectance must be a monotonic function of the angle between the shading normal and the bisector of lighting and viewing directions. Since it does not require the coupling of shape and material estimation, the module allows seamless integration into existing reconstruction pipelines, supporting the mixing and matching of Photometric Stereo methods, BRDF models, and BRDF fitters. The performance and versatility of the approach are demonstrated through experiments. }, url = {https://www.crs4.it/vic/data/papers/gch2024-monotonic.pdf}, } @proceedings{Magnenat:2024:CGI, editor = {Nadia {Magnenat-Thalmann} and Jinman Kim and Bin Sheng and Jian Zhang and Zhigang Deng and Enrico Gobbetti and Ping Li and Daniel Thalmann}, title = {The Visual Computer - Special issue on CGI 2024}, volume = {40}, publisher = {Springer}, month = jul, year = {2024}, isbn = {}, thumbnail = {https://www.crs4.it/vic/data/papers/tvc2024-cgiissue.jpg}, url = {https://link.springer.com/collections/hbjdbjchce}, } @inproceedings{Shah:2024:EEE, idxkey = {}, idxproject = {AIN2,HPCCN}, author = {Uzair Shah and Jens Schneider and Giovanni Pintore and Enrico Gobbetti and Mahmood Alzubaidi and Mowafa Househ and Marco Agus}, title = {{EleViT}: exploiting element-wise products for designing efficient and lightweight vision transformers}, booktitle = {Proc. T4V - IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)}, month = , pages = {}, year = 2024, doi={}, abstract = { We introduce EleViT, a novel vision transformer optimized for image processing tasks. Aligning with the trend towards sustainable computing, EleViT addresses the need for lightweight and fast models without compromising performance by redefining the multihead attention mechanism by primarily using element-wise products instead of traditional matrix multiplication. This modification preserves attention capabilities, while enabling multiple multihead attention blocks within a convolutional projection framework, resulting in a model with fewer parameters and improved efficiency in training and inference, especially for moderately complex datasets. Benchmarks against state-of-theart vision transformers showcase competitive performance on low-data regime datasets like CIFAR-10, CIFAR-100, and Tiny-ImageNet-200. }, url = {https://www.crs4.it/vic/data/papers/cvprt4v2024-elevit.pdf}, } @inproceedings{Shah:2024:PSG, idxkey = {}, idxproject = {AIN2,HPCCN}, author = {Uzair Shah and Muhammad Tukur and Mahmood Alzubaidi and Giovanni Pintore and Enrico Gobbetti and Mowafa Househ and Jens Schneider and Marco Agus}, title = {{MultiPanoWise}: holistic deep architecture for multi-task dense prediction from a single panoramic image}, booktitle = {Proc. OmniCV - IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)}, month = , pages = {1311--1321}, year = 2024, doi={0.1109/CVPRW63382.2024.00138}, abstract = { We present a novel holistic deep-learning approach for multi-task learning from a single indoor panoramic image. Our framework, named MultiPanoWise, extends vision transformers to jointly infer multiple pixel-wise signals, such as depth, normals, and semantic segmentation, as well as signals from intrinsic decomposition, such as reflectance and shading. Our solution leverages a specific architecture combining a transformer-based encoder-decoder with multiple heads, by introducing, in particular, a novel context adjustment approach, to enforce knowledge distillation between the various signals. Moreover, at training time we introduce a hybrid loss scalarization method based on an augmented Chebychev/hypervolume scheme. We demonstrate the capabilities of the proposed architecture on public-domain synthetic and real-world datasets. We showcase performance improvements with respect to the most recent methods specifically designed for single tasks, like, for example, individual depth estimation or semantic segmentation. To the best of our knowledge, this is the first architecture able to achieve state-of-the-art performance on the joint extraction of heterogeneous signals from single indoor omnidirectional images }, url = {https://www.crs4.it/vic/data/papers/omnicv2024-multipanowise.pdf}, } @Article{Pintore:2024:DSE, idxkey = {TOP-THEME-INDOOR, TOP-THEME-MOBILE}, idxproject = {XDATA,AIN2,HPCCN}, author = {Giovanni Pintore and Alberto Jaspe-Villanueva and Markus Hadwiger and Jens Schneider and Marco Agus and Fabio Marton and Fabio Bettio and Enrico Gobbetti}, title = {Deep synthesis and exploration of omnidirectional stereoscopic environments from a single surround-view panoramic image}, journal = {Computers \& Graphics}, year = 2024, volume = {119}, number = {}, month = mar, pages = {103907}, issn = {}, doi = {10.1016/j.cag.2024.103907}, abstract = { We introduce an innovative approach to automatically generate and explore immersive stereoscopic indoor environments derived from a single monoscopic panoramic image in an equirectangular format. Once per 360$^\circ$ shot, we estimate the per-pixel depth using a gated deep network architecture. Subsequently, we synthesize a collection of panoramic slices through reprojection and view-synthesis employing deep learning. These slices are distributed around the central viewpoint, with each slice's projection center placed on the circular path covered by the eyes during a head rotation. Furthermore, each slice encompasses an angular extent sufficient to accommodate the potential gaze directions of both the left and right eye and to provide context for reconstruction. For fast display, a stereoscopic multiple-center-of-projection stereo pair in equirectangular format is composed by suitably blending the precomputed slices. At run-time, the pair is loaded in a lightweight WebXR viewer that responds to head rotations, offering both motion and stereo cues. The approach combines and extends state-of-the-art data-driven techniques, incorporating several innovations. Notably, a gated architecture is introduced for panoramic monocular depth estimation. Leveraging the predicted depth, the same gated architecture is then applied to the re-projection of visible pixels, facilitating the inpainting of occluded and disoccluded regions by incorporating a mixed Generative Adversarial Network. The resulting system works on a variety of available VR headsets and can serve as a base component for immersive applications. We demonstrate our technology on several indoor scenes from publicly available data. }, url = {https://www.crs4.it/vic/data/papers/cag2024-panostereo.pdf}, } @Article{Massidda:2024:PDP, idxkey = {}, idxproject = {ENERGIDRICA,HPCCN}, author = {Luca Massidda and Fabio Bettio and Marino Marrocu}, title = {Probabilistic day-ahead prediction of {PV} generation. A comparative analysis of forecasting methodologies and of the factors influencing accuracy}, journal = {Solar Energy}, volume = {271}, number = {}, month = mar, pages = {112422}, year = 2024, doi={10.1016/j.solener.2024.112422}, abstract = { Photovoltaic (PV) power forecasting is essential for the integration of renewable energy sources into the grid and for the optimisation of energy management systems. In this paper, we address the problem of probabilistic day-ahead forecasting of PV power generation for an operating plant with imperfect measurements and incomplete information. We compare four probabilistic forecasting methodologies: one physical irradiance-to-power method based on a model of the power plant and on weather forecasts, and four statistical methods based on quantile regression and classification techniques. We evaluate the performance of these methods in terms of deterministic and probabilistic accuracy, as well as the influence of the forecast horizon and the autoregressive component. The results show that statistical methods outperform the physical method, that conformalized quantile regression achieves the highest probabilistic accuracy, and that weather forecasts are more important than autoregressive predictors for the forecast procedure. To our knowledge, this is one of the first studies to compare different probabilistic forecasting approaches on the same case and provides information on the relative importance of the factors affecting the accuracy of the forecast. }, thumbnail = {https://www.crs4.it/vic/data/papers/se2024-probpvpredict.jpg}, } @Article{Pintore:2024:DPD, idxkey = {TOP-THEME-INDOOR}, idxproject = {EVOCATION,XDATA}, author = {Giovanni Pintore and Eva Almansa and Armando Sanchez and Giorgio Vassena and Enrico Gobbetti}, title = {Deep Panoramic Depth Prediction and Completion for Indoor Scenes}, journal = {Computational Visual Media}, volume = {}, number = {}, month = feb, pages = {}, year = 2024, doi={10.1007/s41095-023-0358-0}, abstract = { We introduce a novel end-to-end deep-learning solution for rapidly estimating a dense spherical depth map of an indoor environment. Our input is a single equirectangular image registered with a sparse depth map, as provided by a variety of common capture setups. Depth is inferred by an efficient and lightweight single-branch network, which employs a dynamic gating system to process together dense visual data and sparse geometric data. We exploit the characteristics of typical man-made environments to efficiently compress multi-resolution features and find short- and long-range relations among scene parts. Furthermore, we introduce a new augmentation strategy to make the model robust to different types of sparsity, including those generated by various structured light sensors and LiDAR setups. The experimental results demonstrate that our method provides interactive performance and outperforms state-of-the-art solutions in computational efficiency, adaptivity to variable depth sparsity patterns, and prediction accuracy for challenging indoor data, even when trained solely on synthetic data without any fine tuning. }, url = {https://link.springer.com/content/pdf/10.1007/s41095-023-0358-0.pdf}, thumbnail = {https://www.crs4.it/vic/data/papers/cvm2024-sparse2dense.jpg}, } @PhdThesis{Almansa:2024:DD3, idxproject = {EVOCATION}, author = {Eva Almansa}, title = {Data-driven depth and 3D architectural layout estimation of an interior environment from monocular panoramic input}, school = {PhD Programme in Mathematics and Computer Science, University of Cagliari, Italy}, year = 2024, abstract= { In recent years, there has been significant research interest in the automatic 3D reconstruction and modeling of indoor scenes from capture data, giving rise to an emerging sub-field within 3D reconstruction. The primary goal is to convert an input source, which represents a sample of a real-world indoor environment, into a model that may encompass geometric, structural, and/or visual abstractions. Within the scope of this thesis, the focus has been on the extraction of geometric information from a single panoramic image, either by using only visual data or aided by very sparse registered depth information. This particular setup has attracted a lot of interest in recent years, since 360$^\circ$ images offer rapid and comprehensive single-image coverage and they are supported by a wide range of professional and consumer capture devices, which makes the data acquisition process both efficient and cost-effective. On the other hand, despite the 360-degree coverage, inferring a comprehensive model from mostly visual input in presence of noise, missing data, and clutter remains very challenging. Thus, my research has focused on finding clever ways to exploit prior information, in the form of architectural priors and data-driven priors derived from large sets of examples, to design end-to-end deep learning solutions to solve well-defined fundamental tasks in the structured reconstruction pipeline. The tasks on which I have focused are, in particular, depth estimation from a single 360-degree image, depth completion from a single 360-degree image enriched with sparse depth measurements, and 3D architectural layout estimation from a single 360-degree image. While the first two problems produce pixel-wise input in terms of a dense depth map, the latter consists in the reconstruction, from the image of the furnished room, of a simplified model of the 3D shape of the bounding permanent surfaces of a room. As a first contribution towards reconstructing indoor information from purely visual data, I introduced a novel deep neural network to estimate a depth map from a single monocular indoor panorama. The network directly works on the equirectangular projection, exploiting the properties of indoor 360-degree images. Starting from the fact that gravity plays an important role in the design and construction of man-made indoor scenes, the network compactly encodes the scene into vertical spherical slices, and exploits long- and short-term relationships among slices to recover an equirectangular depth map directly from an equirectangular RGB image. My second contribution expands this approach to the common situation in which we receive as input a single equirectangular image registered with a sparse depth map, as provided by a variety of common capture setups. In this approach, depth is inferred by an efficient and lightweight single-branch network, which employs a dynamic gating system to process together dense visual data and sparse geometric data. Furthermore, a new augmentation strategy makes the model robust to different types of sparsity, including those generated by various structured light sensors and LiDAR setups. While the two preceding contribution focus on the estimation of per-pixel geometric information, my third contribution has tackled the problem of recovering the 3D shape of the bounding permanent surfaces of a room from a single panoramic image. The method also exploits gravity-alighted features, but within a significantly different setup, dictacted by the fact that not only we need to separate walls, ceilings, and floor, but we need to recover the plausible shape of invisible areas. The proposed approach, differently from prior state-of-the-art methods, fully addresses the problem in 3D, significantly expanding the reconstruction space. In particular, a graph convolutional network directly infers the room structure as a 3D mesh by progressively deforming a graph-encoded tessellated sphere mapped to the spherical panorama, leveraging perceptual features extracted from the input image. Gravity-aligned features are actively incorporated in the graph in a projection layer that exploits the recent concept of multi head self-attention, and specialized losses guide towards plausible solutions even in presence of massive clutter and occlusions. The benchmarks on publicly available data show that all three methods are on par or better with respect to the state-of-the-art. }, url = {https://www.crs4.it/vic/data/papers/2024-phd-almansa-indoor.pdf}, } %################################ %### 2023 %################################ @inproceedings{Tukur:2023:PSG, idxkey = {}, idxproject = {AIN2}, author = {Muhammad Tukur and Atiq {Ur Rehman} and Giovanni Pintore and Enrico Gobbetti and Jens Schneider and Marco Agus}, title = {{PanoStyle}: Semantic, Geometry-Aware and Shading Independent Photorealistic Style Transfer for Indoor Panoramic Scenes}, booktitle = {Proc. of the First Computer Vision Aided Architectural Design Workshop, International Conference of Computer Vision (ICCVW)}, month = oct, pages = {1553--1564}, year = 2023, doi={}, abstract = { While current style transfer models have achieved impressive results for the application of artistic style to generic images, they face challenges in achieving photorealistic performances on indoor scenes, especially the ones represented by panoramic images. Moreover, existing models overlook the unique characteristics of indoor panoramas, which possess particular geometry and semantic properties. To address these limitations, we propose the first geometry-aware and shading-independent, photorealistic and semantic style transfer method for indoor panoramic scenes. Our approach extends semantic-aware generative adversarial architecture capabilities by introducing two novel strategies to account the geometric characteristics of indoor scenes and to enhance performance. Firstly, we incorporate strong geometry losses that use layout and depth inference at the training stage to enforce shape consistency between generated and ground truth scenes. Secondly, we apply a shading decomposition scheme to extract the albedo and normalized shading signal from the original scenes, and we apply the style transfer on albedo instead of full RGB images, thereby preventing shading-related bleeding issues. On top of that, we apply super-resolution to the resulting scenes to improve image quality and yield fine details. We evaluate our model's performance on public domain synthetic data sets. Our proposed architecture outperforms state-of-the-art style transfer models in terms of perceptual and accuracy metrics, achieving a 26.76\% lower ArtFID, a 6.95\% higher PSNR, and a 25.23\% higher SSIM. The visual results show that our method is effective in producing realistic and visually pleasing indoor scenes. }, thumbnail = {https://www.crs4.it/vic/data/papers/cvaad2023-panostyle.jpg}, url = {https://openaccess.thecvf.com/content/ICCV2023W/CVAAD/html/Tukur_PanoStyle_Semantic_Geometry-Aware_and_Shading_Independent_Photorealistic_Style_Transfer_for_ICCVW_2023_paper.html}, } @inproceedings{Pintore:2023:PAG, idxkey = {}, idxproject = {AIN2}, author = {Giovanni Pintore and Alberto {Jaspe Villanueva} and Markus Hadwiget and Enrico Gobbetti and Jens Schneider and Marco Agus}, title = {PanoVerse: automatic generation of stereoscopic environments from single indoor panoramic images for Metaverse applications}, booktitle = {Proc. Web3D 2023 - 28th International ACM Conference on 3D Web Technology}, month = oct, pages = {}, year = 2023, doi={10.1145/3611314.3615914}, abstract = { We present a novel framework, dubbed \textbf{PanoVerse}, for the automatic creation and presentation of immersive stereoscopic environments from a single indoor panoramic image. Once per 360$^\circ$ shot, a novel data-driven architecture generates a fixed set of panoramic stereo pairs distributed around the current central view-point. Once per frame, directly on the HMD, we rapidly fuse the precomputed views to seamlessly cover the exploration workspace. To realize this system, we introduce several novel techniques that combine and extend state-of-the art data-driven techniques. In particular, we present a gated architecture for panoramic monocular depth estimation and, starting from the re-projection of visible pixels based on predicted depth, we exploit the same gated architecture for inpainting the occluded and disoccluded areas, introducing a mixed GAN with self-supervised loss to evaluate the stereoscopic consistency of the generated images. At interactive rates, we interpolate precomputed panoramas to produce photorealistic stereoscopic views in a lightweight WebXR viewer. The system works on a variety of available VR headsets and can serve as a base component for Metaverse applications. We demonstrate our technology on several indoor scenes from publicly available data. }, url = {https://www.crs4.it/vic/data/papers/web3d2023-panoverse.pdf}, note = {Honorable mention award in the best paper category at Web3D 2023}, } @Article{Pintore:2023:DSS, idxkey = {TOP-THEME-INDOOR, TOP-THEME-MOBILE}, idxproject = {AIN2,XDATA}, author = {Giovanni Pintore and Fabio Bettio and Marco Agus and Enrico Gobbetti}, title = {Deep scene synthesis of Atlanta-world interiors from a single omnidirectional image}, journal = {IEEE Transactions on Visualization and Computer Graphics}, volume = {29}, number = {}, month = nov, pages = {}, year = 2023, doi={10.1109/TVCG.2023.3320219}, abstract = { We present a new data-driven approach for extracting geometric and structural information from a single spherical panorama of an interior scene, and for using this information to render the scene from novel points of view, enhancing 3D immersion in VR applications. The approach copes with the inherent ambiguities of single-image geometry estimation and novel view synthesis by focusing on the very common case of Atlanta-world interiors, bounded by horizontal floors and ceilings and vertical walls. Based on this prior, we introduce a novel end-to-end deep learning approach to jointly estimate the depth and the underlying room structure of the scene. The prior guides the design of the network and of novel domain-specific loss functions, shifting the major computational load on a training phase that exploits available large-scale synthetic panoramic imagery. An extremely lightweight network uses geometric and structural information to infer novel panoramic views from translated positions at interactive rates, from which perspective views matching head rotations are produced and upsampled to the display size. As a result, our method automatically produces new poses around the original camera at interactive rates, within a working area suitable for producing depth cues for VR applications, especially when using head-mounted displays connected to graphics servers. The extracted floor plan and 3D wall structure can also be used to support room exploration. The experimental results demonstrate that our method provides low-latency performance and improves over current state-of-the-art solutions in prediction accuracy on available commonly used indoor panoramic benchmarks.}, url = {https://www.crs4.it/vic/data/papers/ismar2023-panosynth.pdf}, note = {Proc. ISMAR} } @inproceedings{Righetto:2023:EIV, idxkey = {TOP-THEME-MOBILE}, idxproject = {XDATA}, author = {Leonardo Righetto and Fabio Bettio and Federico Ponchio and Andrea Giachetti and Enrico Gobbetti}, title = {Effective interactive visualization of neural relightable images in a web-based multi-layered framework}, booktitle = {The 21th Eurographics Workshop on Graphics and Cultural Heritage}, month = , pages = {57--66}, year = 2023, doi={10.2312/gch.20231158}, abstract = { Relightable images created from Multi-Light Image Collections (MLICs) are one of the most commonly employed models for interactive object exploration in cultural heritage. In recent years, neural representations have been shown to produce higher-quality images, at similar storage costs, with respect to the more classic analytical models such as Polynomial Texture Maps (PTM) or Hemispherical Harmonics (HSH). However, their integration in practical interactive tools has so far been limited due to the higher evaluation cost, making it difficult to employ them for interactive inspection of large images, and to the difficulty in integration cost, due to the need to incorporate deep-learning libraries in relightable renderers. In this paper, we illustrate how a state-of-the-art neural reflectance model can be directly evaluated, using common WebGL shader features, inside a multi-platform renderer. We then show how this solution can be embedded in a scalable framework capable to handle multi-layered relightable models in web settings. We finally show the performance and capabilities of the method on cultural heritage objects.}, url = {https://www.crs4.it/vic/data/papers/gch2023-neurolime.pdf}, } @inproceedings{Righetto:2023:ACS, idxkey = {}, idxproject = {}, author = {Leonardo Righetto and Arianna Traviglia and Michela {De Bernardin} and Enrico Gobbetti and Federico Ponchio and Andrea Giachetti}, title = {Ancient coins' surface inspection with web-based neural {RTI} visualization}, booktitle = {Optics for Arts, Architecture, and Archaeology (O3A) IX}, publisher = {SPIE}, volume = {12620}, month = , pages = {12620:0D}, year = 2023, doi={10.1117/12.2674888}, abstract = { The use of neural encodings has the potential to replace the commonly used polynomial fitting in the analysis of artwork surface based on Reflectance Transformation Imaging (RTI), as it has proved to result in more compact encoding with better relight quality, but it is still not widely used due to the lack of efficient implementations available to practitioners. In this work, we describe an optimized system to encode/decode neural relightable images providing interactive visualization in a web interface allowing multi-layer visualization and annotation. To develop it, we performed several experiments testing different decoder architectures and input processing pipelines, evaluating the quality of the results on specific benchmarks to find the optimal tradeoff between relighting quality and efficiency. A specific decoder has been then implemented for the web and integrated into an advanced visualisation tool. The system has been tested for the analysis of a group of ancient Roman bronze coins that present scarce readability and varying levels of preservation and that have been acquired with a multispectral light dome. Their level of corrosion and degradation, which in some cases hinders the recognition of the images, numerals, or text represented on them, makes the system testing particularly challenging and complex. Testing on such a real case scenario, however, enables us to determine the actual improvement that this new RTI visualization tool can offer to numismatists in their ability to identify the coins. }, thumbnail = {https://www.crs4.it/vic/data/papers/spie2023-coins.jpg}, } @article{Tukur:2023:SFP, idxkey = {TOP-THEME-INDOOR}, idxproject = {EVOCATION,XDATA}, author = {Muhammad Tukur and Giovanni Pintore and Enrico Gobbetti and Jens Schneider and Marco Agus}, title = {{SPIDER}: A framework for processing, editing and presenting immersive high-resolution spherical indoor scenes}, journal = {Graphical Models}, volume = 128, number = , month = jul, pages = {101182:1--101182:11}, year = 2023, doi={10.1016/j.gmod.2023.101182}, abstract = { Today's Extended Reality (XR) applications that call for specific Diminished Reality (DR) strategies to hide specific classes of objects are increasingly using 360-degree cameras, which can capture entire areas in a single picture. In this work, we present an interactive-based image processing, editing and rendering system named SPIDER, that takes a spherical 360-degree indoor scene as input. The system is composed of a novel integrated deep learning architecture for extracting geometric and semantic information of full and empty rooms, based on gated and dilated convolutions, followed by a super-resolution module for improving the resolution of the color and depth signals. The obtained high resolution representations allow users to perform interactive exploration and basic editing operations on the reconstructed indoor scene, namely: i) rendering of the scene in various modalities (point cloud, polygonal, wireframe) ii) refurnishing (transferring portions of rooms) iii) deferred shading through the usage of precomputed normal maps. These kinds of scene editing and manipulations can be used for assessing the inference from deep learning models and enable several Mixed Reality applications in areas such as furniture retails, interior designs, and real estates. Moreover, it can also be useful in data augmentation, arts, designs, and paintings. We report on the performance improvement of the various processing components on public domain spherical image indoor datasets. }, thumbnail = {https://www.crs4.it/vic/data/papers/gm2023-spider.jpg}, url = {https://www.sciencedirect.com/science/article/pii/S1524070323000127}, } @article{Zoccheddu:2023:HIB, idxkey = {TOP-THEME-MESHES}, idxproject = {}, author = {Francesco Zoccheddu and Enrico Gobbetti and Marco Livesu and Nico Pietroni and Gianmarco Cherchi}, title = {{HexBox}: Interactive Box Modeling of Hexahedral Meshes}, journal = {Computer Graphics Forum}, volume = {42}, number = {5}, month = jul, pages = {1--15}, doi = {10.1111/cgf.14899}, year = {2023}, issn = {}, abstract = { We introduce HexBox, an intuitive modeling method and interactive tool for creating and editing hexahedral meshes. Hexbox brings the major and widely validated surface modeling paradigm of surface box modeling into the world of hex meshing. The main idea is to allow the user to box-model a volumetric mesh by primarily modifying its surface through a set of topological and geometric operations. We support, in particular, local and global subdivision, various instantiations of extrusion, removal, and cloning of elements, the creation of non-conformal or conformal grids, as well as shape modifications through vertex positioning, including manual editing, automatic smoothing, or, eventually, projection on an externally-provided target surface. At the core of the efficient implementation of the method is the coherent maintenance, at all steps, of two parallel data structures: a hexahedral mesh representing the topology and geometry of the currently modeled shape, and a directed acyclic graph that connects operation nodes to the affected mesh hexahedra. Operations are realized by exploiting recent advancements in grid-based meshing, such as mixing of 3-refinement, 2-refinement, and face-refinement, and using templated topological bridges to enforce on-the-fly mesh conformity across pairs of adjacent elements. A direct manipulation user interface lets users control all operations. The effectiveness of our tool, released as open source to the community, is demonstrated by modeling several complex shapes hard to realize with competing tools and techniques.}, url = {https://www.crs4.it/vic/data/papers/sgp2023-hexbox.pdf}, } @article{Pintus:2023:ELS, idxkey = {TOP-THEME-ACQUISITION}, idxproject = {EVOCATION,SVDC,VIGECLAB}, author = {Ruggero Pintus and Moonisa Ahsan and Antonio Zorcolo and Fabio Bettio and Fabio Marton and Enrico Gobbetti}, title = {Exploiting Local Shape and Material Similarity for Effective {SV-BRDF} Reconstruction from Sparse Multi-Light Image Collections}, journal = {ACM Journal on Computing and Cultural Heritage (JOCCH)}, volume = {16}, number = {2}, month = jun, pages = {39:1--39:31}, year = 2023, doi = {10.1145/3593428}, abstract = { We present a practical solution to create a relightable model from small Multi-light Image Collections (MLICs) acquired using standard acquisition pipelines. The approach targets the difficult but very common situation in which the optical behavior of a flat, but visually and geometrically rich object, such as a painting or a bas relief, is measured using a fixed camera taking a limited number of images with a different local illumination. By exploiting information from neighboring pixels through a carefully-crafted weighting and regularization scheme, we are able to efficiently infer subtle and visually pleasing per-pixel analytical Bidirectional Reflectance Distribution Functions (BRDFs) representations from few per-pixel samples. The method has a low memory footprint and is easily parallelizabile. We qualitatively and quantitatively evaluated it on both synthetic and real data in the scope of image-based relighting applications. }, url = {https://www.crs4.it/vic/data/papers/jocch2023-svbrdf.pdf}, } @PhdThesis{Ahsan:2023:SEC, idxproject = {EVOCATION}, author = {Moonisa Ahsan}, title = {Scalable Exploration of Complex Objects and Environments Beyond Plain Visual Replication}, school = {PhD Programme in Mathematics and Computer Science, University of Cagliari, Italy}, year = 2023, abstract= { Digital multimedia content and presentation means are rapidly increasing their sophistication and are now capable of describing detailed representations of the physical world. 3D exploration experiences allow people to appreciate, understand and interact with intrinsically virtual objects. Communicating information on objects requires the ability to explore them under different angles, as well as to mix highly photorealistic or illustrative presentations of the object themselves with additional data that provides additional insights on these objects, typically represented in the form of annotations. Effectively providing these capabilities requires the solution of important problems in visualization and user interaction. In this thesis, I studied these problems in the cultural heritage-computing-domain, focusing on the very common and important special case of mostly planar, but visually,geometrically, and semantically rich objects. These could be generally roughly flat objects with a standard frontal viewing direction (e.g., paintings, inscriptions, bas- reliefs), as well as visualizations of fully 3D objects from a particular point of views (e.g., canonical views of buildings or statues). Selecting a precise application domain and a specific presentation mode allowed me to concentrate on the well defined use-case of the exploration of annotated relightable stratigraphic models (in particular, for local and remote museum presentation). My main results and contributions to the state of the art have been a novel technique for interactively controlling visualization lenses while automatically maintaining good focus-and-context parameters, a novel approach for avoiding clutter in an annotated model and for guiding users towards interesting areas, and a method for structuring audio-visual object annotations into a graph and for using that graph to improve guidance and support storytelling and automated tours. We demonstrated the effectiveness and potential of our techniques by performing interactive exploration sessions on various screen sizes and types ranging from desktop devices to large-screen displays for a walk-up-and-use museum installation. }, url = {https://www.crs4.it/vic/data/papers/2023-phd-ahsan-exploration.pdf}, } %################################ %### 2022 %################################ @inproceedings{Tukur:2022:SSI, idxkey = {}, idxproject = {VIGECLAB,EVOCATION}, author = {Muhammad Tukur and Giovanni Pintore and Enrico Gobbetti and Jens Schneider and Marco Agus}, title = {{SPIDER}: Spherical Indoor Depth Renderer}, booktitle = {Proc. Smart Tools and Applications in Graphics (STAG)}, month = oct, pages = {131--138}, year = 2022, doi={10.2312/stag.20221267}, abstract = { Today's Extended Reality (XR) applications that call for specific Diminished Reality (DR) strategies to hide specific classes of objects are increasingly using 360◦ cameras, which can capture entire areas in a single picture. In this work, we present an interactive-based image editing and rendering system named SPIDER, that takes a spherical 360◦ indoor scene as input. The system incorporates the output of deep learning models to abstract the segmentation and depth images of full and empty rooms to allow users to perform interactive exploration and basic editing operations on the reconstructed indoor scene, namely: i) rendering of the scene in various modalities (point cloud, polygonal, wireframe) ii) refurnishing (transferring portions of rooms) iii) deferred shading through the usage of recomputed normal maps. These kinds of scene editing and manipulations can be used for assessing the inference from deep learning models and enable several Mixed Reality (XR) applications in areas such as furniture retails, interior designs, and real estates. Moreover, it can also be useful in data augmentation, arts, designs, and paintings. }, url = {https://www.crs4.it/vic/data/papers/stag2022-spider.pdf}, } @Article{Pintore:2022:IAE, idxkey = {TOP-THEME-INDOOR}, idxproject = {EVOCATION,SVDC}, author = {Giovanni Pintore and Marco Agus and Eva Almansa and Enrico Gobbetti}, title = {Instant Automatic Emptying of Panoramic Indoor Scenes}, journal = {IEEE Transactions on Visualization and Computer Graphics}, volume = {28}, number = {11}, month = nov, pages = {3629--3639}, year = 2022, doi={10.1109/TVCG.2022.3202999}, abstract = { Nowadays 360$^{\circ}$ cameras, capable to capture full environments in a single shot, are increasingly being used in a variety of Extended Reality (XR) applications that require specific Diminished Reality (DR) techniques to conceal selected classes of objects. In this work, we present a new data-driven approach that, from an input 360$^{\circ}$ image of a furnished indoor space automatically returns, with very low latency, an omnidirectional photorealistic view and architecturally plausible depth of the same scene emptied of all clutter. Contrary to recent data-driven inpainting methods that remove single user-defined objects based on their semantics, our approach is holistically applied to the entire scene, and is capable to separate the clutter from the architectural structure in a single step. By exploiting peculiar geometric features of the indoor environment, we shift the major computational load on the training phase and having an extremely lightweight network at prediction time. Our end-to-end approach starts by calculating an attention mask of the clutter in the image based on the geometric difference between full and empty scene. This mask is then propagated through gated convolutions that drive the generation of the output image and its depth. Returning the depth of the resulting structure allows us to exploit, during supervised training, geometric losses of different orders, including robust pixel-wise geometric losses and high-order 3D constraints typical of indoor structures. The experimental results demonstrate that our method provides interactive performance and outperforms current state-of-the-art solutions in prediction accuracy on available commonly used indoor panoramic benchmarks. In addition, our method presents consistent quality results even for scenes captured in the wild and for data for which there is no ground truth to support supervised training. }, url = {https://www.crs4.it/vic/data/papers/ismar2022-emptying.pdf}, note = {Proc. ISMAR} } @inproceedings{Al-Thelaya:2022:FVA, idxproject = {SVDC,PAM}, author={Khaled Al-Thelaya and {Faaiz Hussain Kahn} Joad and {Nauman Ullah} {Gilal} and William Mifsud and Giovanni Pintore and Enrico Gobbetti and Marco Agus and Jens Schneider}, title={HistoContours: a framework for visual annotation of histopathology whole slide images}, booktitle={Proc. Eurographics Workshop on Visual Computing for Biology and Medicine (VCBM)}, month=sep, year=2022, doi = {10.2312/vcbm.2022119}, pages = {99--109}, abstract={ We present an end-to-end framework for histopathological analysis of whole slide images (WSIs). Our framework uses deep learning-based localization \& classification of cell nuclei followed by spatial data aggregation to propagate classes of sparsely distributed nuclei across the entire slide. We use YOLO (“You Only Look Once”) for localization instead of more costly segmentation approaches and show that using HistAuGAN boosts its performance. YOLO finds bounding boxes around nuclei at good accuracy, but the classification accuracy can be improved by other methods. To this end, we extract patches around nuclei from the WSI and consider models from the SqueezeNet, ResNet, and EfficientNet families for classification. Where we do not achieve a clear separation between highest and second-highest softmax activation of the classifier, we use YOLO’s output as a secondary vote. The result is a sparse annotation of the WSI which we turn dense by using kernel density estimation. The result is a full vector of probabilities, per pixel, for each class of nucleus we consider. This allows us to visualize our results using both color-coding and iso-contouring, reducing visual clutter. Our novel nuclei-to-tissue coupling allows histopathologists to work at both the nucleus and the tissue level, a feature appreciated by domain experts in a qualitative user study. }, url = {https://www.crs4.it/vic/data/papers/vcbm2022-histo.pdf}, note = {Best full paper award} } @inproceedings{Ahsan:2022:EFU, idxkey = {}, idxproject = {SVDC,EVOCATION}, author = {Moonisa Ahsan and Giuliana Altea and Fabio Bettio and Marco Callieri and Antonella Camarda and Paolo Cignoni and Enrico Gobbetti and Paolo Ledda and Alessandro Lutzu and Fabio Marton and Giuseppe Mignemi and Federico Ponchio}, title = {Ebb \& Flow: Uncovering {Costantino Nivola}'s {Olivetti} Sandcast through {3D} Fabrication and Virtual Exploration}, booktitle = {The 20th Eurographics Workshop on Graphics and Cultural Heritage}, month = sep, pages = {85--94}, year = 2022, doi={10.2312/gch.20221230}, abstract = { We report on the outcomes of a large multi-disciplinary project targeting the physical reproduction and virtual documentation and exploration of the Olivetti sandcast, a monumental (over 100m$^2$) semi-abstract frieze by the Italian sculptor Costantino Nivola. After summarizing the goal and motivation of the project, we provide details on the acquisition and processing steps that led to the creation of a 3D digital model. We then discuss the technical details and the challenges that we have faced for the physical fabrication process of a massive physical replica, which was the centerpiece of a recent exhibition. We finally discuss the design and application of an interactive web-based tool for the exploration of an annotated virtual replica. The main components of the tool will be released as open source. }, url = {https://www.crs4.it/vic/data/papers/gch2022-nivola.pdf}, } @inproceedings{Agus:2022:VVA, idxkey = {TOP-THEME-VOLUMETRIC}, idxproject = {SVDC}, author = {Marco Agus and Amal Aboulhassan and Khaled {Al-Thelaya} and Giovanni Pintore and Enrico Gobbetti and Corrado Cal\`i and Jens Schneider}, title = {{Volume Puzzle}: visual analysis of segmented volume data with multivariate attributes}, booktitle = {Proc. IEEE Visualization and Visual Analytics (VIS)}, month = nov, pages = {130--134}, year = 2022, doi={10.1109/VIS54862.2022.00035}, abstract = {A variety of application domains, including material science, neuroscience, and connectomics, commonly use segmented volume data for explorative visual analysis. In many cases, segmented objects are characterized by multivariate attributes expressing specific geometric or physical features. Objects with similar characteristics, determined by selected attribute configurations, can create peculiar spatial patterns, whose detection and study is of fundamental importance. This task is notoriously difficult, especially when the number of attributes per segment is large. In this work, we propose an interactive framework that combines a state-of-the-art direct volume renderer for categorical volumes with techniques for the analysis of the attribute space and for the automatic creation of 2D transfer function. We show, in particular, how dimensionality reduction, kernel-density estimation, and topological techniques such as Morse analysis combined with scatter and density plots allow the efficient design of two-dimensional color maps that highlight spatial patterns. The capabilities of our framework are demonstrated on synthetic and real-world data from several domains.}, url = {https://www.crs4.it/vic/data/papers/visshort2022-volumepuzzle.pdf}, } @Article{Ahsan:2022:AAG, idxkey = {TOP-THEME-UI}, idxproject = {EVOCATION,SVDC}, author = {Moonisa Ahsan and Fabio Marton and Ruggero Pintus and Enrico Gobbetti}, title = {Audio-visual Annotation Graphs for Guiding Lens-based Scene Exploration}, journal = {Computers \& Graphics}, year = 2022, volume = {105}, number = {}, pages = {131--145}, issn = {}, doi = {10.1016/j.cag.2022.05.003}, abstract = { We introduce a novel approach for guiding users in the exploration of annotated 2D models using interactive visualization lenses. Information on the interesting areas of the model is encoded in an annotation graph generated at authoring time. Each graph node contains an annotation, in the form of a visual and audio markup of the area of interest, as well as the optimal lens parameters that should be used to explore the annotated area and a scalar representing the annotation importance. Directed graph edges are used, instead, to represent preferred ordering relations in the presentation of annotations, by having each node point to the set of nodes that should be seen before presenting its associated annotation. A scalar associated to each edge determines the strength of this constraint. At run-time, users explore the scene with the lens, and the graph is exploited to select the annotations that have to be presented at a given time. The selection is based on the current view and lens parameters, the graph content and structure, and the navigation history. The best annotation under the lens is presented by playing the associated audio clip and showing the visual markup in overlay. When the user releases control, requests guidance, opts for automatic touring, or when no available annotations are under the lens, the system guides the user towards the next best annotation using glyphs, and potentially moves the lens towards it if the user remains inactive. This approach supports the seamless blending of an automatic tour of the data with interactive lens-based exploration. The approach is tested and discussed in the context of the exploration of multi-layer relightable models. }, url = {https://www.crs4.it/vic/data/papers/cag2022-avagraphs.pdf}, } @Article{Mohanto:2022:IVF, idxkey = {}, idxproject = {EVOCATION}, author = {Bipul Mohanto and {ABM Tariqul} Islam and Enrico Gobbetti and Oliver Staadt}, title = {An integrative view of foveated rendering}, journal = {Computers \& Graphics}, year = 2022, volume = {102}, number = {}, pages = {474--501}, issn = {}, doi = {10.1016/j.cag.2021.10.010}, abstract = { Foveated rendering adapts the image synthesis process to the user's gaze. By exploiting the human visual system's limitations, in particular in terms of reduced acuity in peripheral vision, it strives to deliver high-quality visual experiences at very reduced computational, storage, and transmission costs. Despite the very substantial progress made in the past decades, the solution landscape is still fragmented, and several research problems remain open. In this work, we present an up-to-date integrative view of the domain from the point of view of the rendering methods employed, discussing general characteristics, commonalities, differences, advantages, and limitations. We cover, in particular, techniques based on adaptive resolution, geometric simplification, shading simplification, chromatic degradation, as well spatio-temporal deterioration. Next, we review the main areas where foveated rendering is already in use today. We finally point out relevant research issues and analyze research trends. }, thumbnail = {https://www.crs4.it/vic/data/papers/cag2021-foveated.jpg}, url = {https://www.sciencedirect.com/science/article/pii/S0097849321002211/pdfft?download=true} } %################################ %### 2021 %################################ @Article{Pintore:2021:D3R, idxkey = {TOP-THEME-INDOOR}, idxproject = {EVOCATION,VIGECLAB}, author = {Giovanni Pintore and Eva Almansa and Marco Agus and Enrico Gobbetti}, title = {{Deep3DLayout}: {3D} Reconstruction of an Indoor Layout from a Spherical Panoramic Image}, journal = {ACM Transactions on Graphics}, volume = {40}, number = {6}, month = dec, pages = {250:1--250:12}, year = 2021, doi={10.1145/3478513.3480480}, abstract = { Recovering the 3D shape of the bounding permanent surfaces of a room from a single image is a key component of indoor reconstruction pipelines. In this article, we introduce a novel deep learning technique capable to produce, at interactive rates, a tessellated bounding 3D surface from a single $360^\circ$ image. Differently from prior solutions, we fully address the problem in 3D, significantly expanding the reconstruction space of prior solutions. A graph convolutional network directly infers the room structure as a 3D mesh by progressively deforming a graph-encoded tessellated sphere mapped to the spherical panorama, leveraging perceptual features extracted from the input image. Important 3D properties of indoor environments are exploited in our design. In particular, gravity-aligned features are actively incorporated in the graph in a projection layer that exploits the recent concept of multi head self-attention, and specialized losses guide towards plausible solutions even in presence of massive clutter and occlusions. Extensive experiments demonstrate that our approach outperforms current state of the art methods in terms of accuracy and capability to reconstruct more complex environments. }, url = {https://www.crs4.it/vic/data/papers/sigasia2021-deep3dlayout.pdf}, note = {Proc. SIGGRAPH Asia 2021} } @article{Livesu:2021:ODS, idxkey = {}, idxproject = {}, author = {Marco Livesu and Luca Pitzalis and Gianmarco Cherchi}, title = {Optimal Dual Schemes for Adaptive Grid Based Hexmeshing}, journal = {ACM Transactions on Graphics}, volume = {41}, number = {2}, month = dec, pages = {15:1--15:14}, doi = {10.1145/3494456}, year = {2021}, issn = {0730-0301}, abstract = {Hexahedral meshes are a ubiquitous domain for the numerical resolution of partial differential equations. Computing a pure hexahedral mesh from an adaptively refined grid is a prominent approach to automatic hexmeshing, and requires the ability to restore the all hex property around the hanging nodes that arise at the interface between cells having different size. The most advanced tools to accomplish this task are based on mesh dualization. These approaches use topological schemes to regularize the valence of inner vertices and edges, such that dualizing the grid yields a pure hexahedral mesh. In this article, we study in detail the dual approach, and propose four main contributions to it: (i) We enumerate all the possible transitions that dual methods must be able to handle, showing that prior schemes do not natively cover all of them; (ii) We show that schemes are internally asymmetric, therefore not only their construction is ambiguous, but different implementative choices lead to hexahedral meshes with different singular structure; (iii) We explore the combinatorial space of dual schemes, selecting the minimum set that covers all the possible configurations and also yields the simplest singular structure in the output hexmesh; (iv) We enlarge the class of adaptive grids that can be transformed into pure hexahedral meshes, relaxing one of the tight topological requirements imposed by previous approaches. Our extensive experiments show that our transition schemes consistently outperform prior art in terms of ability to converge to a valid solution, amount and distribution of singular mesh edges, and element count. Last but not least, we publicly release our code and reveal a conspicuous amount of technical details that were overlooked in previous literature, lowering an entry barrier that was hard to overcome for practitioners in the field.}, url = {https://www.crs4.it/vic/data/papers/tog2021-dualschemes.pdf}, } @Article{Pitzalis:2021:GAR, idxkey = {TOP-THEME-MESHES}, idxproject = {VIGECLAB}, author = {Luca Pitzalis and Marco Livesu and Gianmarco Cherchi and Enrico Gobbetti and Riccardo Scateni}, title = {Generalized Adaptive Refinement for Grid-based Hexahedral Meshing}, journal = {ACM Transactions on Graphics}, volume = {40}, number = {6}, month = dec, pages = {257:1--257:13}, year = 2021, doi={10.1145/3478513.3480508}, abstract = { Due to their nice numerical properties, conforming hexahedral meshes are considered a prominent computational domain for simulation tasks. However, the automatic decomposition of a general 3D volume into a small number of hexahedral elements is very challenging. Methods that create an adaptive Cartesian grid and convert it into a conforming mesh offer superior robustness and are the only ones concretely used in the industry. Topological schemes that permit this conversion can be applied only if precise compatibility conditions among grid elements are observed. Some of these conditions are local, hence easy to formulate; others are not and are much harder to satisfy. State-of-the-art approaches fulfill these conditions by prescribing additional refinement based on special building rules for octrees. These methods operate in a restricted space of solutions and are prone to severely over-refine the input grids, creating a bottleneck in the simulation pipeline. In this article, we introduce a novel approach to transform a general adaptive grid into a new grid meeting hexmeshing criteria, without resorting to tree rules. Our key insight is that we can formulate all compatibility conditions as linear constraints in an integer programming problem by choosing the proper set of unknowns. Since we operate in a broader solution space, we are able to meet topological hexmeshing criteria at a much coarser scale than methods using octrees, also supporting generalized grids of any shape or topology. We demonstrate the superiority of our approach for both traditional grid-based hexmeshing and adaptive polycube-based hexmeshing. In all our experiments, our method never prescribed more refinement than the prior art and, in the average case, it introduced close to half the number of extra cells. }, url = {https://www.crs4.it/vic/data/papers/sigasia2021-hexmeshing.pdf}, note = {Proc. SIGGRAPH Asia 2021} } @inproceedings{Pintus:2021:ENP, idxkey = {}, idxproject = {VIGECLAB,EVOCATION}, author = {Ruggero Pintus and Moonisa Ahsan and Fabio Marton and Enrico Gobbetti}, title = {Exploiting Neighboring Pixels Similarity for Effective {SV-BRDF} Reconstruction from Sparse {MLICs}}, booktitle = {The 19th Eurographics Workshop on Graphics and Cultural Heritage}, month = nov, pages = {}, year = 2021, doi={10.2312/gch.20211412}, abstract = { We present a practical solution to create a relightable model from Multi-light Image Collections (MLICs) acquired using standard acquisition pipelines. The approach targets the difficult but very common situation in which the optical behavior of a flat, but visually and geometrically rich object, such as a painting or a bas relief, is measured using a fixed camera taking few images with a different local illumination. By exploiting information from neighboring pixels through a carefully crafted weighting and regularization scheme, we are able to efficiently infer subtle per-pixel analytical Bidirectional Reflectance Distribution Functions (BRDFs) representations from few per-pixel samples. The method is qualitatively and quantitatively evaluated on both synthetic and real data in the scope of image-based relighting applications. }, url = {https://www.crs4.it/vic/data/papers/gch2021-svbrdf.pdf}, note = {Best paper award at GCH 2021} } @inproceedings{Ahsan:2021:GLE, idxkey = {}, idxproject = {VIGECLAB,EVOCATION}, author = {Moonisa Ahsan and Fabio Marton and Ruggero Pintus and Enrico Gobbetti}, title = {Guiding Lens-based Exploration using Annotation Graphs}, booktitle = {Proc. Smart Tools and Applications in Graphics (STAG)}, month = oct, pages = {85--90}, year = 2021, doi={10.2312/stag.20211477}, abstract = { We introduce a novel approach for guiding users in the exploration of annotated 2D models using interactive visualization lenses. Information on the interesting areas of the model is encoded in an annotation graph generated at authoring time. Each graph node contains an annotation, in the form of a visual markup of the area of interest, as well as the optimal lens parameters that should be used to explore the annotated area and a scalar representing the annotation importance. Graph edges are used, instead, to represent preferred ordering relations in the presentation of annotations. A scalar associated to each edge determines the strength of this prescription. At run-time, the graph is exploited to assist users in their navigation by determining the next best annotation in the database and moving the lens towards it when the user releases interactive control. The selection is based on the current view and lens parameters, the graph content and structure, and the navigation history. This approach supports the seamless blending of an automatic tour of the data with interactive lens-based exploration. The approach is tested and discussed in the context of the exploration of multi-layer relightable models. }, url = {https://www.crs4.it/vic/data/papers/stag2021-annotation-graphs.pdf}, note = {Honorable mention in best paper award category at STAG 2021} } @Article{Pintus:2021:PEM, idxkey = {TOP-THEME-ACQUISITION}, idxproject = {VIGECLAB}, author = {Ruggero Pintus and Alberto {Jaspe Villanueva} and Antonio Zorcolo and Markus Hadwiger and Enrico Gobbetti}, title = {A Practical and Efficient Model for Intensity Calibration of Multi-Light Image Collections}, journal = {The Visual Computer}, volume = {37}, number = {9}, month = sep, pages = {2755--2767}, year = 2021, doi={10.1007/s00371-021-02172-9}, abstract = { We present a novel practical and efficient mathematical formulation for light intensity calibration of Multi Light Image Collections (MLICs). Inspired by existing and orthogonal calibration methods, we design a hybrid solution that leverages their strengths while overcoming most of their weaknesses. We combine the rationale of approaches based on fixed analytical models with the interpolation scheme of image domain methods. This allows us to minimize the final residual error in light intensity estimation, without imposing an overly constraining illuminant type. Unlike previous approaches, the proposed calibration strategy proved to be simpler, more efficient and versatile, and extremely adaptable in different setup scenarios. We conduct an extensive analysis and validation of our new light model compared to several state-of-the-art techniques, and we show how the proposed solution provides a more reliable outcomes in terms of accuracy and precision, and a more stable calibration across different light positions/orientations, and with a more general light form factor. }, url = {https://www.crs4.it/vic/data/papers/tvc2021-mlic-calibration.pdf}, note = {Best paper award at CGI 2021} } @Article{AlThelaya:2021:IIS, idxkey = {}, idxproject = {VIGECLAB,DIFRA}, author = {Khaled {Al-Thelaya} and Marco Agus and {Nauman Ullah} Gilal and Yin Yang and Giovanni Pintore and Enrico Gobbetti and Corrado Cal\'i and {Pierre J.} Magistretti and William Mifsud and Jens Schneider}, title = {{InShaDe}: Invariant Shape Descriptors for visual {2D} and {3D} cellular and nuclear shape analysis and classification}, journal = {Computers \& Graphics}, year = 2021, volume = {98}, number = {}, pages = {105--125}, issn = {0097-8493}, doi = {10.1016/j.cag.2021.04.037}, abstract = { We present a shape processing framework for visual exploration of cellular nuclear envelopes extracted from microscopic images arising in histology and neuroscience. The framework is based on a novel shape descriptor of closed contours in 2D and 3D. In 2D, it relies on a geodesically uniform resampling of discrete curves to compute unsigned curvatures at vertices and edges based on discrete differential geometry. Our descriptor is, by design, invariant under translation, rotation, and parameterization. We achieve the latter invariance under parameterization shifts by using elliptic Fourier analysis on the resulting curvature vectors. Uniform scale-invariance is optional and is a result of scaling curvature features to z-scores. We further augment the proposed descriptor with feature coefficients obtained through sparse coding of the extracted cellular structures using K-sparse autoencoders. For the analysis of 3D shapes, we compute mean curvatures based on the Laplace-Beltrami operator on triangular meshes, followed by computing a spherical parameterization through mean curvature flow. Finally, we compute the Spherical Harmonics decomposition to obtain invariant energy coefficients. Our invariant descriptors provide an embedding into a fixed-dimensional feature space that can be used for various applications, e.g., as input features for deep and shallow learning techniques or as input for dimension reduction schemes to provide a visual reference for clustering shape collections. We demonstrate the capabilities of our framework in the context of visual analysis and unsupervised classification of 2D histology images and 3D nuclear envelopes extracted from serial section electron microscopy stacks. }, url = {https://www.crs4.it/vic/data/papers/cag2021-inshade.pdf}, } @Article{Bettio:2021:NAE, idxkey = {TOP-THEME-UI, TOP-THEME-MOBILE}, idxproject = {EVOCATION,TDM,VIGECLAB}, author = {Fabio Bettio and Moonisa Ahsan and Fabio Marton and Enrico Gobbetti}, title = {A novel approach for exploring annotated data with interactive lenses}, journal = j-CG-FORUM, year = 2021, volume = {40}, number = {3}, pages = {387--398}, doi = {10.1111/cgf.14315}, abstract = { We introduce a novel approach for assisting users in exploring 2D data representations with an interactive lens. Focus-and-context exploration is supported by translating user actions to the joint adjustments in camera and lens parameters that ensure a good placement and sizing of the lens within the view. This general approach, implemented using standard device mappings, overcomes the limitations of current solutions, which force users to continuously switch from lens positioning and scaling to view panning and zooming. Navigation is further assisted by exploiting data annotations. In addition to traditional visual markups and information links, we associate to each annotation a lens configuration that highlights the region of interest. During interaction, an assisting controller determines the next best lens in the database based on the current view and lens parameters and the navigation history. Then, the controller interactively guides the user's lens towards the selected target and displays its annotation markup. As only one annotation markup is displayed at a time, clutter is reduced. Moreover, in addition to guidance, the navigation can also be automated to create a tour through the data. While our methods are generally applicable to general 2D visualization, we have implemented them for the exploration of stratigraphic relightable models. The capabilities of our approach are demonstrated in cultural heritage use cases. A user study has been performed in order to validate our approach. }, url = {https://www.crs4.it/vic/data/papers/ev2021-lenses.pdf}, note = {Proc. EUROVIS 2021} } @InProceedings{Pintore:2021:SDD, idxkey = {TOP-THEME-INDOOR}, idxproject = {EVOCATION, VIGECLAB}, author = {Giovanni Pintore and Marco Agus and Eva Almansa and Jens Schneider and Enrico Gobbetti}, title = {{SliceNet}: deep dense depth estimation from a single indoor panorama using a slice-based representation}, booktitle = {Proc. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, pages = {11531--11540}, year = {2021}, doi = {10.1109/CVPR46437.2021.01137}, abstract = { We introduce a novel deep neural network to estimate a depth map from a single monocular indoor panorama. The network directly works on the equirectangular projection, exploiting the properties of indoor 360-degree images. Starting from the fact that gravity plays an important role in the design and construction of man-made indoor scenes, we propose a compact representation of the scene into vertical slices of the sphere, and we exploit long- and short-term relationships among slices to recover the equirectangular depth map. Our design makes it possible to maintain high-resolution information in the extracted features even with a deep network. The experimental results demonstrate that our method outperforms current state-of-the-art solutions in prediction accuracy, particularly for real-world data. }, url = {https://www.crs4.it/vic/data/papers/cvpr2021-slicenet.pdf}, note = {Selected as oral presentation}, } @Article{Nuvoli:2021:ASS, idxkey = {TOP-THEME-MESHES}, idxproject = {VIGECLAB}, author = {Stefano Nuvoli and Alessandro Tola and Alessandro Muntoni and Nico Pietroni and Enrico Gobbetti and Riccardo Scateni}, title = {Automatic Surface Segmentation for Seamless Fabrication using 4-axis Milling Machines}, journal = j-CG-FORUM, year = 2021, volume = {40}, number = {2}, pages = {191--203}, doi = {10.1111/cgf.142625}, abstract = { We introduce a novel geometry-processing pipeline to guide the fabrication of complex shapes from a single block of material using 4-axis CNC milling machines. This setup extends classical 3-axis CNC machining with an extra degree of freedom to rotate the object around a fixed axis. The first step of our pipeline identifies the rotation axis that maximizes the overall fabrication accuracy. Then we identify two height-field regions at the rotation axis's extremes used to secure the block on the rotation tool. We segment the remaining portion of the mesh into a set of height-fields whose principal directions are orthogonal to the rotation axis. The segmentation balances the approximation quality, the boundary smoothness, and the total number of patches. Additionally, the segmentation process takes into account the object's geometric features, as well as saliency information. The output is a set of meshes ready to be processed by off-the-shelf software for the 3-axis tool-path generation. We present several results to demonstrate the quality and efficiency of our approach to a range of inputs. }, url = {https://www.crs4.it/vic/data/papers/eg2021-4axis.pdf}, note = {Proc. Eurographics 2021}, } @Article{Jaspe:2021:WEA, idxkey = {TOP-THEME-MOBILE}, idxproject = {EVOCATION,TDM,VIGECLAB}, author = {Alberto {Jaspe Villanueva} and Moonisa Ahsan and Ruggero Pintus and Andrea Giachetti and Enrico Gobbetti}, title = {Web-based Exploration of Annotated Multi-Layered Relightable Image Models}, journal = {ACM Journal on Computing and Cultural Heritage (JOCCH)}, volume = {14}, number = {2}, month = may, pages = {24:1--24:31}, year = 2021, doi = {10.1145/3430846}, abstract = { We introduce a novel approach for exploring image-based shape and material models registered with structured descriptive information fused in multi-scale overlays. We represent the objects of interest as a series of registered layers of image-based shape and material data. These layers are represented at different scales, and can come out of a variety of pipelines. These layers can include both RTI representations, and spatially-varying normal and BRDF fields, possibly as a result of fusing multi-spectral data. An overlay image pyramid associates visual annotations to the various scales. The overlay pyramid of each layer is created at data preparation time by either one of the three subsequent methods: (1) by importing it from other pipelines; (2) by creating it with the simple annotation drawing toolkit available within the viewer; (3) with external image editing tools. This makes it easier for the user to seamlessly draw annotations over the region of interest. At run-time, clients can access an annotated multi-layered dataset by a standard web server. Users can explore these datasets on a variety of devices; they range from small mobile devices to large scale displays used in museum installations. On all these aforementioned platforms, JavaScript/WebGL2 clients running in browsers are fully-capable of performing layer selection, interactive relighting, enhanced visualization, and annotation display. We address the problem of clutter by embedding interactive lenses. This focus-and-context-aware (multiple-layer) exploration tool supports exploration of more than one representations in a single view. That allows mixing and matching of presentation modes and annotation display. The capabilities of our approach are demonstrated on a variety of cultural heritage use cases. That involves different kinds of annotated surface and material models. }, url = {https://www.crs4.it/vic/data/papers/jocch2021-marlie.pdf}, } %################################ %### 2020 %################################ @inproceedings{Dulecha:2020:SBE, idxkey = {}, idxproject = {VIGECLAB}, author={Tinsae Dulecha and Ruggero Pintus and Enrico Gobbetti and Andrea Giachetti}, title={{SynthPS}: a benchmark for evaluation of {Photometric Stereo} algorithms for {Cultural Heritage} applications}, booktitle={The 17th Eurographics Workshop on Graphics and Cultural Heritage}, month=nov, year=2020, doi = {10.2312/gch.20201288}, pages = {13--22}, abstract={ Photometric Stereo (PS) is a technique for estimating surface normals from a collection of images captured from a fixed viewpoint and with variable lighting. Over the years, several methods have been proposed for the task, trying to cope with different materials, lights, and camera calibration issues. An accurate evaluation and selection of the best PS methods for different materials and acquisition setups is a fundamental step for the accurate quantitative reconstruction of objects' shapes. In particular, it would boost quantitative reconstruction in the Cultural Heritage domain, where a large amount of Multi-Light Image Collections are captured with light domes or handheld Reflectance Transformation Imaging protocols. However, the lack of benchmarks specifically designed for this goal makes it difficult to compare the available methods and choose the most suitable technique for practical applications. An ideal benchmark should enable the evaluation of the quality of the reconstructed normals on the kind of surfaces typically captured in real-world applications, possibly evaluating performance variability as a function of material properties, light distribution, and image quality. The evaluation should not depend on light and camera calibration issues. In this paper, we propose a benchmark of this kind, SynthPS, which includes synthetic, physically-based renderings of Cultural Heritage object models with different assigned materials. SynthPS allowed us to evaluate the performance of classical, robust and learning-based Photometric Stereo approaches on different materials with different light distributions, also analyzing their robustness against errors typically arising in practical acquisition settings, including robustness against gamma correction and light calibration errors. }, url = {https://www.crs4.it/vic/data/papers/gch2020-synthps.pdf}, } @proceedings{Biasotti:2020:STA, idxkey = {}, idxproject = {VIGECLAB}, editor = {Silvia Biasotti and Ruggero Pintus and Stefano Berretti}, title = {Smart Tools and Apps for Graphics}, publisher = {Eurographics Association}, year = {2020}, isbn = {978-3-03868-124-3}, thumbnail = {https://www.crs4.it/vic/data/papers/stag2020.jpg}, } @inproceedings{Pitzalis:2020:WVM, idxkey = {}, idxproject = {}, author = {Luca Pitzalis and Gianmarco Cherchi and Riccardo Scateni and Lucio Spano}, title = {Working with Volumetric Meshes in a Game Engine: a Unity Prototype}, booktitle = {Proc. Smart Tools and Apps for Graphics}, year = {2020}, pages = {57--62}, DOI = {10.2312/stag.20201240}, url = {https://www.crs4.it/vic/data/papers/stag2020-volmesh-unity.pdf} } @article{Barracca:2020:ECM, title={Evolution of Clinical Medicine: From Expert Opinion to Artificial Intelligence}, author={Antonio Barracca and Mauro Contini and Stefano Ledda and Gianmaria Mancosu and Giovanni Pintore and Kianoush Kashani and Claudio Ronco}, journal={Journal of Translational Critical Care Medicine}, volume={2}, number={4}, pages={78--82}, year={2020}, DOI={10.4103/jtccm.jtccm_6_21}, url = {https://www.tccmjournal.com/article.asp?issn=2665-9190;year=2020;volume=2;issue=4;spage=78;epage=82;aulast=Barracca}, thumbnail={https://www.crs4.it/vic/vic/img/thumb-none.jpg}, } @inproceedings{Agus:2020:IIS, idxproject = {VIGECLAB}, author={Marco Agus and Khaled Al-Thelaya and Corrado Cal\'i and Marina Boido and Yin Yang and Giovanni Pintore and Enrico Gobbetti and Jens Schneider}, title={{InShaDe}: Invariant Shape Descriptors for visual analysis of histology 2D cellular and nuclear shapes}, booktitle={Proc. Eurographics Workshop on Visual Computing for Biology and Medicine (VCBM)}, month=sep, year=2020, doi = {10.2312/vcbm.20201173}, pages = {61--70}, abstract={ We present a shape processing framework for visual exploration of cellular nuclear envelopes extracted from histology images. The framework is based on a novel shape descriptor of closed contours relying on a geodesically uniform resampling of discrete curves to allow for discrete differential-geometry-based computation of unsigned curvature at vertices and edges. Our descriptor is, by design, invariant under translation, rotation and parameterization. Moreover, it additionally offers the option for uniform-scale-invariance. The optional scale-invariance is achieved by scaling features to z-scores, while invariance under parameterization shifts is achieved by using elliptic Fourier analysis (EFA) on the resulting curvature vectors. These invariant shape descriptors provide an embedding into a fixed-dimensional feature space that can be utilized for various applications: (i) as input features for deep and shallow learning techniques; (ii) as input for dimension reduction schemes for providing a visual reference for clustering collection of shapes. The capabilities of the proposed framework are demonstrated in the context of visual analysis and unsupervised classification of histology images. }, url = {https://www.crs4.it/vic/data/papers/vcbm2020-inshade.pdf}, } @InProceedings{Pintore:2020:AI3, idxkey = {TOP-THEME-INDOOR}, idxproject = {VIGECLAB,AMAC,TDM,EVOCATION}, author = {Giovanni Pintore and Marco Agus and Enrico Gobbetti}, title = {{AtlantaNet}: Inferring the {3D} Indoor Layout from a Single 360 Image beyond the {Manhattan} World Assumption}, booktitle = {Proc. ECCV}, year = 2020, month = aug, isbn = {}, doi = {10.1007/978-3-030-58598-3_26}, location = {}, pages = {432--448}, articleno = {}, abstract = { We introduce a novel end-to-end approach to predict a 3D room layout from a single panoramic image. Compared to recent state-of-the-art works, our method is not limited to Manhattan World environments, and can reconstruct rooms bounded by vertical walls that do not form right angles or are curved -- i.e., Atlanta World models. In our approach, we project the original gravity-aligned panoramic image on two horizontal planes, one above and one below the camera. This representation encodes all the information needed to recover the \emph{Atlanta World} 3D bounding surfaces of the room in the form of a 2D room footprint on the floor plan and a room height. To predict the 3D layout, we propose an encoder-decoder neural network architecture, leveraging Recurrent Neural Networks (RNNs) to capture long-range geometric patterns, and exploiting a customized training strategy based on domain-specific knowledge. The experimental results demonstrate that our method outperforms state-of-the-art solutions in prediction accuracy, in particular in cases of complex wall layouts or curved wall footprints. }, url = {https://www.crs4.it/vic/data/papers/eccv2020-atlantanet.pdf}, } @InProceedings{Pintore:2020:A3R, idxkey = {}, idxproject = {VIGECLAB,AMAC,TDM,EVOCATION}, author = {Giovanni Pintore and Claudio Mura and Fabio Ganovelli and Lizeth Fuentes-Perez and Renato Pajarola and Enrico Gobbetti}, title = {Automatic {3D} Reconstruction of Structured Indoor Environments}, booktitle = {SIGGRAPH 2020 Courses}, year = 2020, month = aug, doi = {10.1145/3388769.3407469}, pages = {10:1--10:218}, abstract = { Creating high-level structured 3D models of real-world indoor scenes from captured data is a fundamental task which has important applications in many fields. Given the complexity and variability of interior environments and the need to cope with noisy and partial captured data, many open research problems remain, despite the substantial progress made in the past decade. In this tutorial, we provide an up-to-date integrative view of the field, bridging complementary views coming from computer graphics and computer vision. After providing a characterization of input sources, we define the structure of output models and the priors exploited to bridge the gap between imperfect sources and desired output. We then identify and discuss the main components of a structured reconstruction pipeline, and review how they are combined in scalable solutions working at the building level. We finally point out relevant research issues and analyze research trends. }, url = {https://www.crs4.it/vic/data/papers/sig2020-tutorial-indoor-course-notes.pdf}, } @inproceedings{Agus:2020:WE3, idxproject = {VIGEC}, author={Marco Agus and Enrico Gobbetti and Giovanni Pintore and Corrado Cal\`i and Jens Schneider}, title={{WISH}: efficient {3D} biological shape classification through {Willmore} flow and {Spherical Harmonics} decomposition}, booktitle={Proc. IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)}, month=jun, year=2020, doi = {10.1109/CVPRW50498.2020.00494}, pages = {4184--4194}, abstract={ Shape analysis of cell nuclei, enabled by the recent advances in nano-scale digital imaging and reconstruction methods, is emerging as a very important tool to understand low-level biological processes. Current analysis techniques, however, are performed on 2D slices or assume very simple 3D shape approximations , limiting their discrimination capabilities. In this work, we introduce a compact rotation-invariant frequency-based representation of genus-0 3D shapes represented by manifold triangle meshes, that we apply to cell nuclei envelopes reconstructed from electron micrographs. The representation is robustly obtained through Spherical Harmonics coefficients over a spherical parame- terization of the input mesh obtained through Willmore flow. Our results show how our method significantly improves the state-of-the-art in the classification of nuclear envelopes of rodent brain samples. Moreover, while our method is motivated by the analysis of specific biological shapes, the framework is of general use for the compact frequency encoding of any genus-0 surface. }, url = {https://www.crs4.it/vic/data/papers/cwmi2020-wish.pdf}, } @Article{Pintore:2020:SI3, idxkey = {TOP-THEME-INDOOR}, idxproject = {VIGECLAB,AMAC,TDM,EVOCATION}, author = {Giovanni Pintore and Claudio Mura and Fabio Ganovelli and Lizeth Fuentes-Perez and Renato Pajarola and Enrico Gobbetti}, title = {State-of-the-art in Automatic {3D} Reconstruction of Structured Indoor Environments}, journal = j-CG-FORUM, year = 2020, volume = {39}, number = {2}, pages = {667--699}, doi = {10.1111/cgf.14021}, abstract = { Creating high-level structured 3D models of real-world indoor scenes from captured data is a fundamental task which has important applications in many fields. Given the complexity and variability of interior environments and the need to cope with noisy and partial captured data, many open research problems remain, despite the substantial progress made in the past decade. In this survey, we provide an up-to-date integrative view of the field, bridging complementary views coming from computer graphics and computer vision. After providing a characterization of input sources, we define the structure of output models and the priors exploited to bridge the gap between imperfect sources and desired output. We then identify and discuss the main components of a structured reconstruction pipeline, and review how they are combined in scalable solutions working at the building level. We finally point out relevant research issues and analyze research trends. }, url = {https://www.crs4.it/vic/data/papers/eg2020-star-indoor.pdf}, } @Article{Diaz:2020:ISE, idxkey = {TOP-THEME-VOLUMETRIC}, idxproject = {TDM,VIGECLAB}, author = {Jose D{\'i}az and Fabio Marton and Enrico Gobbetti}, title = {Interactive Spatio-Temporal Exploration of Massive Time-Varying Rectilinear Scalar Volumes based on a Variable Bit-Rate Sparse Representation over Learned Dictionaries}, journal = {Computers \& Graphics}, year = 2020, volume = {88}, number = {}, pages = {45--56}, doi = {10.1016/j.cag.2020.03.002}, abstract = { We introduce a novel approach for supporting fully interactive non-linear spatio-temporal exploration of massive time-varying rectilinear scalar volumes on commodity platforms. To do this, we decompose each frame into an octree of overlapping bricks. Each brick is further subdivided into smaller non-overlapping blocks compactly approximated by quantized variable-length sparse linear combinations of prototype blocks stored in a learned data-dependent dictionary. An efficient tolerance-driven learning and approxi- mation process, capable of computing the tolerance required to achieve a given frame size, exploits coresets and an incremental dictionary refinement strategy to cope with datasets made of thousands of multi-gigavoxel frames. The compressed representation of each frame is stored in a GPU-friendly format that supports direct adaptive streaming to the GPU with spatial and temporal random access, view-frustum and transfer-function culling, and transient and local decompression interleaved with ray-casting. Our variable- rate codec provides high-quality approximations at very low bit-rates, while offering real-time decoding performance. Thus, the bandwidth provided by current commodity PCs proves sufficient to fully stream and render a working set of one gigavoxel per frame without relying on partial updates, thus avoiding any unwanted dynamic effects introduced by current incremental loading approaches. The quality and performance of our approach is demonstrated on massive time-varying datasets at the terascale. }, url = {https://www.crs4.it/vic/data/papers/cag2020-mtvplayer.pdf}, } @article{Boges:2020:VRF, idxkey = {}, idxproject = {}, author = {Daniya Boges and Marco Agus and Ronell Sicat and {Pierre J.} Magistretti and Markus Hadwiger and Corrado Cal\`i}, title = {Virtual reality framework for editing and exploring medial axis representations of nanometric scale neural structures}, journal = {Computers \& Graphics}, volume = {91}, pages = {12--24}, year = 2020, issn = {0097-8493}, doi = {10.1016/j.cag.2020.05.024}, abstract = {We present a novel virtual reality (VR) based framework for the exploratory analysis of nanoscale 3D reconstructions of cellular structures acquired from rodent brain samples through serial electron microscopy. The system is specifically targeted on medial axis representations (skeletons) of branched and tubular structures of cellular shapes, and it is designed for providing to domain scientists: i) effective and fast semi-automatic interfaces for tracing skeletons directly on surface-based representations of cells and structures, ii) fast tools for proofreading, i.e., correcting and editing of semi-automatically constructed skeleton representations, and iii) natural methods for interactive exploration, i.e., measuring, comparing, and analyzing geometric features related to cellular structures based on medial axis representations. Neuroscientists currently use the system for performing morphology studies on sparse reconstructions of glial cells and neurons extracted from a sample of the somatosensory cortex of a juvenile rat. The framework runs in a standard PC and has been tested on two different display and interaction setups: PC-tethered stereoscopic head-mounted display (HMD) with 3D controllers and tracking sensors, and a large display wall with a standard gamepad controller. We report on a user study that we carried out for analyzing user performance on different tasks using these two setups.}, url = {https://www.sciencedirect.com/science/article/pii/S0097849320300789}, thumbnail = {https://www.crs4.it/vic/data/papers/cag2020-neurovr.jpg}, } @Article{Agus:2020:FSS, idxkey = {}, idxproject = {VIGECLAB}, author = {Marco Livesu and Massimiliano Corsini and Ruggero Pintus}, title = {Foreword to the Special Section on Smart Tools and Applications in Computer Graphics (STAG 2019)}, journal = {Computers \& Graphics}, year = 2020, volume = {83}, doi = {10.1016/j.cag.2020.05.027}, abstract = { This special issue contains extended and revised versions of the best papers presented at the 6th Smart Tools and Applications in Graphics (STAG 2019), held in Cagliari, on 14–15 November, 2019. The three selected papers span different visual computing domains: (i) a framework for fully interactive non-linear spatio-temporal exploration of massive time-varying rectilinear scalar volumes on commodity platforms, (ii) a visualization system for reviewers for identifying researchers working on a certain topic, analyzing their contributions over time, and highlighting co-authorship relations and conflicts, (iii) a VR environment for editing and proofreading medial axis representations of nanoscale brain cell morphologies. }, url = {https://www.sciencedirect.com/science/article/abs/pii/S009784932030087X}, thumbnail = {https://www.crs4.it/vic/data/papers/cag2020-foreword.jpg}, } @TechReport{Bettio:2020:TVS, idxproject = {TDM}, author = {Fabio Bettio and Alberto {Jaspe Villanueva} and Enrico Gobbetti and Fabio Marton and Antonio Zorcolo and Gianmarco Cherchi and Riccardo Scateni}, title = {Tecnologie per la visualizzazione scalabile (versione preliminare)}, type = {Deliverable}, number = {D6.1}, institution = {TDM Project, RAS POR FESR 2014-2020}, year = 2019, abstract = { TDM e un progetto collaborativo tra CRS4 e Universit ` a di Cagliari che combina attività di ricerca, sviluppo, sperimentazione e formazione nel campo dell’informatica urbana. Il lavoro nel campo della visualizzazione mira a facilitare la comprensione di dati massivi e complessi attraverso metodi visuali. Per far questo, ci concentriamo su due linee di attivita ben distinte. Da un lato, ci occupiamo di creare e validare metodi e applicazioni funzionanti su piattaforma web per la visualizzazione interattiva di open data, rendendoli disponibili sul portale di progetto. Dall’altro avanziamo lo stato dell’arte nella visualizzazione di grandi volumi di dati attraverso lo studio e sviluppo di nuovi metodi matematici ed informatici. In questo deliverable presentiamo la versione preliminare delle tecnologie di visualizzazione sviluppate nel progetto. Il deliverable e stato completato durante il periodo di restrizioni dovute alle misure di contrasto alla pandemia COVID-19 e, per causa di forza maggiore, non riporta quindi gli aspetti relativi all’integrazione completa con la sensoristica di progetto. Questi aspetti saranno discussi nel prossimo deliverable (D6.2). }, thumbnail = {https://www.crs4.it/vic/data/papers/tdm.jpg}, url = {https://www.crs4.it/vic/data/papers/TDM-D6_1-visualizzazione-2020-06.pdf}, } %################################ %### 2019 %################################ @Article{Cali:2019:3CR, idxkey = {}, idxproject = {VIGECLAB}, author = {Corrado Cal\`i and Marco Agus and Kalpana Kare and {Daniya J.} Boges and Heikki Lehvaslaiho and Markus Hadwiger and {Pierre J.} Magistretti}, title = {{3D} cellular reconstruction of cortical glia and parenchymal morphometric analysis from {Serial Block-Face Electron Microscopy} of juvenile rat}, journal = {Progress in Neurobiology}, year = 2019, volume = {}, number = {}, pages = {}, doi = {10.1016/j.pneurobio.2019.101696}, abstract = { With the rapid evolution in the automation of serial electron microscopy in life sciences, the acquisition of terabyte-sized datasets is becoming increasingly common. High resolution serial block-face imaging (SBEM) of biological tissues offers the opportunity to segment and reconstruct nanoscale structures to reveal spatial features previously inaccessible with simple, single section, two-dimensional images. In particular, we focussed here on glial cells, whose reconstruction efforts in literature are still limited, compared to neurons. We imaged a 750,000 cubic micron volume of the somatosensory cortex from a juvenile P14 rat, with 20 nm accuracy. We recognized a total of 186 cells using their nuclei, and classified them as neuronal or glial based on features of the soma and the processes. We reconstructed for the first time 4 almost complete astrocytes and neurons, 4 complete microglia and 4 complete pericytes, including their intracellular mitochondria, 186 nuclei and 213 myelinated axons. We then performed quantitative analysis on the three-dimensional models. Out of the data that we generated, we observed that neurons have larger nuclei, which correlated with their lesser density, and that astrocytes and pericytes have a higher surface to volume ratio, compared to other cell types. All reconstructed morphologies represent an important resource for computational neuroscientists, as morphological quantitative information can be inferred, to tune simulations that take into account the spatial compartmentalization of the different cell types. }, thumbnail = {https://www.crs4.it/vic/data/papers/pn2019-sbem.jpg}, } @proceedings{Agus:2019:STA, idxproject = {VIGEC}, editor = {Marco Agus and Massimiliano Corsini and Ruggero Pintus}, title = {Smart Tools and Apps for Graphics}, publisher = {Eurographics Association}, year = {2019}, isbn = {978-3-03868-100-7}, issn = {2617-4855}, thumbnail = {https://www.crs4.it/vic/data/papers/stag2019.jpg}, } @inproceedings{Diaz:2019:MIS, idxproject = {VIGECLAB,TDM}, author={Jose D{\'i}az and Fabio Marton and Enrico Gobbetti}, title={{MTV-Player}: Interactive Spatio-Temporal Exploration of Compressed Large-Scale Time-Varying Rectilinar Scalar Volumes}, booktitle={Proc. Smart Tools and Apps for Graphics}, month=nov, year=2019, doi = {10.2312/stag.20191358}, pages = {1--10}, abstract={ We present an approach for supporting fully interactive exploration of massive time-varying rectilinear scalar volumes on commodity platforms. We decompose each frame into a forest of bricked octrees. Each brick is further subdivided into smaller blocks, which are compactly approximated by quantized variable-length sparse linear combinations of prototype blocks stored in a data-dependent dictionary learned from the input sequence.This variable bit-rate compact representation, obtained through a tolerance-driven learning and approximation process, is stored in a GPU-friendly format that supports direct adaptive streaming to the GPU with spatial and temporal random access. An adaptive compression-domain renderer closely coordinates off-line data selection, streaming, decompression, and rendering. The resulting system provides total control over the spatial and temporal dimensions of the data, supporting the same exploration metaphor as traditional video players. Since we employ a highly compressed representation, the bandwidth provided by current commodity platforms proves sufficient to fully stream and render dynamic representations without relying on partial updates, thus avoiding any unwanted dynamic effects introduced by current incremental loading approaches. Moreover, our variable-rate encoding based on sparse representations provides high-quality approximations, while offering real-time decoding and rendering performance. The quality and performance of our approach is demonstrated on massive time-varying datasets at the terascale, which are nonlinearly explored at interactive rates on a commodity graphics PC. }, url = {https://www.crs4.it/vic/data/papers/stag2019-mtvplayer.pdf}, note = {Best paper award} } @inproceedings{Cherchi:2019:MIS, idxproject = {TDM}, author={Gianmarco Cherchi and Luca Pitzalis and Giovanni {Laerte Frongia} and Riccardo Scateni}, title={The {Py3DViewer} Project: A {Python} Library for fast Prototyping in Geometry Processing}, booktitle={Proc. Smart Tools and Apps for Graphics}, month=nov, year=2019, doi = {10.2312/stag.20191374}, pages = {121--128}, abstract={ Fast research and prototyping, nowadays, is shifting towards languages that allow interactive execution and quick changes. Python is very widely used for rapid prototyping. We introduce Py3DViewer, a new Python library that allows researchers to quickly prototype geometry processing algorithms by interactively editing and viewing meshes. Polygonal and polyhedral meshes are both supported. The library is designed to be used in conjunction with Jupyter environments, which allow interactive Python code execution and data visualization in a browser, thus opening up the possibility of viewing a mesh while editing the underlying geometry and topology. }, thumbnail = {https://www.crs4.it/vic/data/papers/stag2019-py3dviewer.jpg}, } @inproceedings{Jaspe:2019:WME, idxproject = {VIGECLAB,TDM}, author={Alberto {Jaspe Villanueva} and Ruggero Pintus and Andrea Giachetti and Enrico Gobbetti}, title={Web-based Multi-layered Exploration of Annotated Image-based Shape and Material Models}, booktitle={The 16th Eurographics Workshop on Graphics and Cultural Heritage}, month=nov, year=2019, doi = {10.2312/gch.20191346}, pages = {33--42}, abstract={ We introduce a novel versatile approach for letting users explore detailed image-based shape and material models integrated with structured, spatially-associated descriptive information. We represent the objects of interest as a series of registered layers of image-based shape and material information. These layers are represented at multiple scales, and can come out of a variety of pipelines and include both RTI representations and spatially-varying normal and BRDF fields, eventually as a result of fusing multi-spectral data. An overlay image pyramid associates visual annotations to the various scales. The overlay pyramid of each layer can be easily authored at data preparation time using widely available image editing tools. At run-time, an annotated multi-layered dataset is made available to clients by a standard web server. Users can explore these datasets on a variety of devices, from mobile phones to large scale displays in museum installations, using JavaScript/WebGL2 clients capable to perform layer selection, interactive relighting and enhanced visualization, annotation display, and focus-and-context multiple-layer exploration using a lens metaphor. The capabilities of our approach are demonstrated on a variety of cultural heritage use cases involving different kinds of annotated surface and material models. }, url = {https://www.crs4.it/vic/data/papers/gch2019-webviewer.pdf}, note = {Best paper award} } @inproceedings{Dulecha:2019:CDS, idxproject = {VIGECLAB,SCAN4RECO}, author={Tinsae Dulecha and Andrea Giachetti and Ruggero Pintus and Irina Ciortan and Alberto {Jaspe Villanueva} and Enrico Gobbetti}, title={Crack Detection in Single- and Multi-Light Images of Painted Surfaces using Convolutional Neural Networks}, booktitle={The 16th Eurographics Workshop on Graphics and Cultural Heritage}, month=nov, year=2019, doi = {10.2312/gch.20191347}, pages = {43--50}, abstract={ Cracks represent an imminent danger for painted surfaces that needs to be alerted before degenerating into more severe aging effects, such as color loss. Automatic detection of cracks from painted surfaces' images would be therefore extremely useful for art conservators; however, classical image processing solutions are not effective to detect them, distinguish them from other lines or surface characteristics. A possible solution to improve the quality of crack detection exploits Multi-Light Image Collections (MLIC), that are often acquired in the Cultural Heritage domain thanks to the diffusion of the Reflectance Transformation Imaging (RTI) technique, allowing a low cost and rich digitization of artworks' surfaces. In this paper, we propose a pipeline for the detection of crack on egg-tempera paintings from multi-light image acquisitions and that can be used as well on single images. The method is based on single or multi-light edge detection and on a custom Convolutional Neural Network able to classify image patches around edge points as crack or non-crack, trained on RTI data. The pipeline is able to classify regions with cracks with good accuracy when applied on MLIC. Used on single images, it can give still reasonable results. The analysis of the performances for different lighting directions also reveals optimal lighting directions. }, url = {https://www.crs4.it/vic/data/papers/gch2019-cracks.pdf}, } @InProceedings{Bettio:2019:TSA, idxproject = {TDM}, author = {Fabio Bettio and Giovanni Busonera and Marco Cogoni and Roberto Deidda and Mauro {Del Rio} and Massimo Gaggero and Enrico Gobbetti and Simone Leo and Simone Manca and Marino Marrocu and Luca Massidda and Fabio Marton and {Marco Enrico} Piras and Luca Pireddu and Gabriella Pusceddu and Alessandro Seoni and Gianluigi Zanetti}, title = {TDM: un sistema aperto per l'acquisizione di dati, l'analisi e la simulazione su scala metropolitana}, booktitle = {Proc. GARR 2019 - Selected Papers}, year = {2019}, doi = {10.26314/GARR-Conf19-proceedings-09}, pages = {44--49}, abstract = { Il progetto TDM studia e sviluppa nuove tecnologie abilitanti e soluzioni verticali in ambito Smart Cities, sperimentandole nell'area metropolitana della Citt\`a di Cagliari. Il design scalabile, l'utilizzo di standard per le Smart Cities (OASC/FIWARE) e la realizzazione di un dispositivo per l'integrazione di sensori diversi (l'Edge Gateway) fanno s\`i che le soluzioni realizzate nel contesto del progetto TDM possano essere estese ad altre realt\`a urbane. In questo articolo illustriamo l'architettura generale del sistema, il sottosistema dedicato alla sensoristica e le prime applicazioni in campo energetico e meteo-ambientale. }, keywords = {smart-cities, fiware, edge-computing, energy-awareness, nowcasting}, url = {https://www.crs4.it/vic/data/papers/garr2019-tdm.pdf}, } @Article{Pintore:2019:AMC, idxkey = {TOP-THEME-INDOOR}, idxproject = {VIGECLAB,TDM,AMAC}, author = {Giovanni Pintore and Fabio Ganovelli and Alberto {Jaspe Villanueva} and Enrico Gobbetti}, title = {Automatic modeling of cluttered multi-room floor plans from panoramic images}, journal = {Computers Graphics Forum}, year = 2019, volume = {38}, number = {7}, pages = {347--358}, doi = {10.1111/cgf.13842}, abstract = { We present a novel and light-weight approach to capture and reconstruct structured 3D models of multi-room floor plans. Starting from a small set of registered panoramic images, we automatically generate a 3D layout of the rooms and of all the main objects inside. Such a 3D layout is directly suitable for use in a number of real-world applications, such as guidance, location, routing, or content creation for security and energy management. Our novel pipeline introduces several contributions to indoor reconstruction from purely visual data. In particular, we automatically partition panoramic images in a connectivity graph, according to the visual layout of the rooms, and exploit this graph to support object recovery and rooms boundaries extraction. Moreover, we introduce a plane-sweeping approach to jointly reason about the content of multiple images and solve the problem of object inference in a top-down 2D domain. Finally, we combine these methods in a fully automated pipeline for creating a structured 3D model of a multi-room floor plan and of the location and extent of clutter objects. These contribution make our pipeline able to handle cluttered scenes with complex geometry that are challenging to existing techniques. The effectiveness and performance of our approach is evaluated on both real-world and synthetic models. }, url = {https://www.crs4.it/vic/data/papers/pg2019-clutteredfloorplans.pdf}, } @Article{Gobbetti:2019:CER, idxkey = {}, idxproject = {VIGECLAB,TDM}, author = {Enrico Gobbetti}, title = {Creation and Exploration of Reality-based Models}, journal = {Computers Graphics Forum}, year = 2019, volume = {38}, number = {2}, pages = {xvii}, doi = {}, abstract = { The last two decades have seen impressive advances in computer vision, computer graphics, and user interface methods and technologies for creating and exploring high-quality 3D digital replicas of real-world objects. In this talk, I will reflect on the successes, limitations, and challenges of applying these research results in practice, with particular emphasis on the cultural heritage domain. I will also lay out research opportunities lying ahead (or behind us). }, thumbnail = {https://www.crs4.it/vic/data/papers/eg2019-keynote.jpg}, } @Article{Livesu:2019:FSS, idxkey = {}, idxproject = {VIGECLAB}, author = {Marco Livesu and Giovanni Pintore and Alberto Signoroni}, title = {Foreword to the Special Section on Smart Tools and Applications in Computer Graphics (STAG 2018)}, journal = {Computers \& Graphics}, year = 2019, volume = {82}, issn = "0097-8493", doi = {10.1016/j.cag.2019.06.001}, url = {https://www.sciencedirect.com/science/article/pii/S0097849319300950}, abstract = { This special issue contains extended and revised versions of the best papers presented at the 6th Smart Tools and Applications in Graphics (STAG 2018). }, thumbnail = {https://www.crs4.it/vic/data/papers/cag2019-foreword.jpg}, } @Article{Agus:2019:SA3, idxkey = {}, idxproject = {VIGECLAB}, author = {Marco Agus and Maria {Veloz Castillo} and Javier F. {Garnica Molina} and Enrico Gobbetti and Heikki {Lehvaslaiho} and Alex {Morales Tapia} and Pierre Magistretti and Markus Hadwiger and Corrado {Cal\'i}}, title = {Shape analysis of {3D} nanoscale reconstructions of brain cell nuclear envelopes by implicit and explicit parametric representations}, journal = {Computers \& Graphics}, year = 2019, volume = {}, number = {}, pages = { }, doi = {10.1016/j.cagx.2019.100004}, abstract = { Shape analysis of cell nuclei is becoming increasingly important in biology and medicine. Recent results have identified that large variability in shape and size of nuclei has an important impact on many biological processes. Current analysis techniques involve automatic methods for detection and segmentation of histology and microscopy images, but are mostly performed in 2D. Methods for 3D shape analysis, made possible by emerging acquisition methods capable to provide nanometric-scale 3D reconstructions, are still at an early stage, and often assume a simple spherical shape. We introduce here a framework for analyzing 3D nanoscale reconstructions of nuclei of brain cells (mostly neurons), obtained by semiautomatic segmentation of electron micrographs. Our method considers two parametric representations: the first one customizes the implicit \textit{hyperquadrics} formulation and it is particularly suited for convex shapes, while the latter considers a \textit{spherical harmonics} decomposition of the explicit radial representation. Point clouds of nuclear envelopes, extracted from image data, are fitted to the parameterized models which are then used for performing statistical analysis and shape comparisons. We report on the analysis of a collection of 121 nuclei of brain cells obtained from the somatosensory cortex of a juvenile rat. }, url = {https://www.crs4.it/vic/data/papers/cag2019-parabraincell.pdf}, } @Article{Pintus:2019:SMI, idxkey = {TOP-THEME-ACQUISITION}, idxproject = {VIGECLAB}, author = {Ruggero Pintus and Tinsae Dulache and Irina Ciortan and Enrico Gobbetti and Andrea Giachetti}, title = {State-of-the-art in Multi-Light Image Collections for Surface Visualization and Analysis}, journal = j-CG-FORUM, year = 2019, volume = {38}, number = {3}, pages = {909--934}, doi = {10.1111/cgf.13732}, abstract = { Multi-Light Image Collections (MLICs), i.e., stacks of photos of a scene acquired with a fixed viewpoint and a varying surface illumination, provide large amounts of visual and geometric information. In this survey, we provide an up-to-date integrative view of MLICs as a mean to gain insight on objects through the analysis and visualization of the acquired data. After a general overview of MLICs capturing and storage, we focus on the main approaches to produce representations usable for visualization and analysis. In this context, we first discuss methods for direct exploration of the raw data. We then summarize approaches that strive to emphasize shape and material details by fusing all acquisitions in a single enhanced image. Subsequently, we focus on approaches that produce relightable images through intermediate representations. This can be done both by fitting various analytic forms of the light transform function, or by locally estimating the parameters of physically plausible models of shape and reflectance and using them for visualization and analysis. We finally review techniques that improve object understanding by using illustrative approaches to enhance relightable models, or by extracting features and derived maps. We also review how these methods are applied in several, main application domains, and what are the available tools to perform MLIC visualization and analysis. We finally point out relevant research issues, analyze research trends, and offer guidelines for practical applications. }, url = {https://www.crs4.it/vic/data/papers/ev2019-star_mlic.pdf}, } @Article{Agus:2019:IVV, idxkey = {TOP-THEME-VOLUMETRIC}, idxproject = {VIGECLAB}, author = {Marco Agus and Corrado Cal\`i and Ali Al-Awami and Enrico Gobbetti and Pierre Magistretti and Markus Hadwiger}, title = {Interactive Volumetric Visual Analysis of Glycogen-derived Energy Absorption in Nanometric Brain Structures}, journal = j-CG-FORUM, year = 2019, volume = {38}, number = {3}, pages = {427--439}, doi = {10.1111/cgf.13700}, abstract = { Digital acquisition and processing techniques are changing the way neuroscience investigation is carried out. Emerging applications range from statistical analysis on image stacks to complex connectomics visual analysis tools targeted to develop and test hypotheses of brain development and activity. In this work, we focus on neuroenergetics, a field where neuroscientists analyze nanoscale brain morphology and relate energy consumption to glucose storage in form of glycogen granules. In order to facilitate the understanding of neuroenergetic mechanisms, we propose a novel customized pipeline for the visual analysis of nanometric-level reconstructions based on electron microscopy image data. Our framework supports analysis tasks by combining (i) a scalable volume visualization architecture able to selectively render image stacks and corresponding labelled data, (ii) a method for highlighting distance-based energy absorption probabilities in form of glow maps, and (iii) a hybrid connectivity-based and absorption-based interactive layout representation able to support queries for selective analysis of areas of interest and potential activity within the segmented datasets. This working pipeline is currently used in a variety of studies in the neuroenergetics domain. Here, we discuss a test case in which the framework was successfully used by domain scientists for the analysis of aging effects on glycogen metabolism, extracting knowledge from a series of nanoscale brain stacks of rodents somatosensory cortex. }, url = {https://www.crs4.it/vic/data/papers/ev2019-glyco.pdf}, } @Article{Marton:2019:FGE, idxkey = {TOP-THEME-VOLUMETRIC}, idxproject = {VIGECLAB,TDM}, author = {Fabio Marton and Marco Agus and Enrico Gobbetti}, title = {A framework for GPU-accelerated exploration of massive time-varying rectilinear scalar volumes}, journal = j-CG-FORUM, year = 2019, volume = {38}, number = {3}, pages = {53--66}, doi = {10.1111/cgf.13671}, abstract = { We introduce a novel flexible approach to spatiotemporal exploration of rectilinear scalar volumes. Our out-of-core representation, based on per-frame levels of hierarchically tiled non-redundant 3D grids, efficiently supports spatiotemporal random access and streaming to the GPU in compressed formats. A novel low-bitrate codec able to store into fixed-size pages a variable-rate approximation based on sparse coding with learned dictionaries is exploited to meet stringent bandwidth constraint during time-critical operations, while a near-lossless representation is employed to support high-quality static frame rendering. A flexible high-speed GPU decoder and raycasting framework mixes and matches GPU kernels performing parallel object-space and image-space operations for seamless support, on fat and thin clients, of different exploration use cases, including animation and temporal browsing, dynamic exploration of single frames, and high-quality snapshots generated from near-lossless data. The quality and performance of our approach are demonstrated on large data sets with thousands of multi-billion-voxel frames. }, url = {https://www.crs4.it/vic/data/papers/ev2019-gpudynadvr.pdf}, } @TechReport{Cabianca:2019:SAT, idxproject = {TDM}, author = {Muriel Cabianca and Massimo Gaggero and Enrico Gobbetti and Simone Leo and Marino Marrocu and Marco-Enrico Piras and Luca Pireddu and Carlo Podda and Gabriella Pusceddu and Gianluigi Zanetti}, title = {Sistema per l'aggregazione e trattamento big-data e diffusione open data (versione preliminare)}, type = {Deliverable}, number = {D3.3}, institution = {TDM Project, RAS POR FESR 2014-2020}, year = 2019, abstract = { TDM e un progetto collaborativo tra CRS4 e Universit ` a di Cagliari che mira ad offrire nuove soluzioni intelligenti per aumentare l’attrattivita cittadina, la gestione delle risorse e la sicurezza e qualita di vita dei cittadini, attraverso lo studio e sviluppo di tecnologie abilitanti e di soluzioni verticali innovative per la protezione dei rischi ambientali, l’efficienza energetica e la fruizione dei beni culturali. In questo deliverable presentiamo la descrizione dettagliata del blocco di processamento ed analisi dati come anche delle scelte tecnologiche prese per realizzarlo. }, thumbnail = {https://www.crs4.it/vic/data/papers/tdm.jpg}, url = {https://www.crs4.it/vic/data/papers/TDM-D3_3-big_data-2019-06.pdf}, } %################################ %### 2018 %################################ @TechReport{Bettio:2018:PDO, idxproject = {TDM}, author = {Fabio Bettio and Giovanni Busonera and Mauro Delrio and Massimo Gaggero and Enrico Gobbetti and Carlo Impagliazzo and Giuditta Lecca and Gabriella Pusceddu and Simone Manca and Marino Marrocu and Luca Massidda and Gianluigi Zanetti}, title = {Portale di distribuzione open data standardizzato ed accessibile}, type = {Deliverable}, number = {D3.2}, institution = {TDM Project, RAS POR FESR 2014-2020}, year = 2018, abstract = { Il portale di distribuzione Open Data rappresenta il meccanismo primario di fruizione dei dati prodotti dal progetto TDM. Con questa release, Dicembre 2018, rendiamo disponibile una versione pienamente accessibile del portale permettendo quindi ad iniziative di trasferimento tecnologico di iniziare a sperimentare con i dati e le interfacce Open Data di TDM. Questa è una release con fini principalmente metodologici: con essa vengono accese le interfacce e fornito un insieme significativo di esempi concreti delle tipologie di dati prodotti ed esportati dal progetto. Questa dotazione di dati verrà via via ampliata con l'evoluzione del lavoro. Per descrivere i dati TDM usa i principali standard e best practice rilevanti nel contesto delle Smart Cities, basandosi, ad esempio, su formato dati NGSI utilizzato da OASC/FIWARE per i dati da sensori e dallo standard internazionale CF (Climate and Forecast) per la descrizione dei dati provenienti dalle simulazioni e dalle acquisizioni radar. Assieme al portale di accesso, disponibile alla URL https://data.tdm-project.it e alla relativa interfaccia rest https://rest.tdm-project.it viene reso disponibile un portale con applicazioni dimostrative che illustrano l'uso dei dati alla URL: https://demo.tdm-project.it }, thumbnail = {https://www.crs4.it/vic/data/papers/tdm.jpg}, url = {https://www.crs4.it/vic/data/papers/TDM-D3_2-portale_open_data-2018-12.pdf}, } @TechReport{Gaggero:2018:OSR, idxproject = {TDM}, author = {Massimo Gaggero and Giovanni Busonera and Mauro Delrio and Luca Massidda and Marino Marrocu and Enrico Gobbetti and Gianlugi Zanetti}, title = {Reference design per la sensoristica diffusa}, type = {Deliverable}, number = {D3.1}, institution = {TDM Project, RAS POR FESR 2014-2020}, year = 2018, abstract = { Uno degli obiettivi principali del progetto TDM è di realizzare un'architettura scalabile per l'acquisizione, l'integrazione e l'analisi di dati provenienti da sorgenti eterogenee in grado di gestire i dati generati da un'area metropolitana estesa. Una parte di questi dati proverrà da sensoristica diffusa sul territorio. Il progetto prevede la realizzazione di un reference design, di uso generale, per la gestione periferica dei sensori e la trasmissione dei segnali da essi raccolti. Questa piattaforma è denominata Edge Gateway. Questo documento descrive l'architettura hardware/software dell'Edge Gateway e delle sue componenti preposte all'acquisizione e pre-processamento delle misure acquisite dai sensori distribuiti del progetto, e il loro inoltro verso il sistema centrale di raccolta, memorizzazione e analisi. E' inoltre incluso in questo deliverable un manuale di installazione. L'Edge Gateway qui descritto è di uso generale, ed è stato specificatamente realizzato per essere utilizzato con altre infrastrutture basate su FIWARE diverse da TDM. Può inoltre essere utilizzato su altri sistemi quali, ad esempio, i cloud IoT Amazon AWS, IBM Watson, Microsoft Azure per costruire soluzioni OASC-compliant. Tutto il software realizzato è reso disponibile su GITHUB all'indirizzo: https://github.com/tdm-project. }, thumbnail = {https://www.crs4.it/vic/data/papers/tdm.jpg}, url = {https://www.crs4.it/vic/data/papers/TDM-D3_1-refdesign_sensoristica_diffusa-2018-06.pdf}, } @PhdThesis{Jaspe:2018:SE3, idxproject = {DIVA}, author = {Alberto {Jaspe Villanueva}}, title = {Scalable Exploration of {3D} Massive Models}, school = {PhD Programme in Information and Communications Technology, University of A Coru{\~n}a, Spain}, year = 2018, abstract= { This thesis introduces scalable techniques that advance the state-of-the-art in massive model creation and exploration. Concerning model creation, we present methods for improving reality-based scene acquisition and processing, introducing an efficient implementation of scalable out-of-core point clouds and a data-fusion approach for creating detailed colored models from cluttered scene acquisitions. The core of this thesis concerns enabling technology for the exploration of general large datasets. Two novel solutions are introduced. The first is an adaptive out-of-core technique exploiting the GPU rasterization pipeline and hardware occlusion queries in order to create coherent batches of work for localized shader-based ray tracing kernels, opening the door to out-of-core ray tracing with shadowing and global illumination. The second is an aggressive compression method that exploits redundancy in large models to compress data so that it fits, in fully renderable format, in GPU memory. The method is targeted to voxelized representations of 3D scenes, which are widely used to accelerate visibility queries on the GPU. Compression is achieved by merging subtrees that are identical through a similarity transform and by exploiting the skewed distribution of references to shared nodes to store child pointers using a variable bit-rate encoding The capability and performance of all methods are evaluated on many very massive real-world scenes from several domains, including cultural heritage, engineering, and gaming. }, url = {https://www.crs4.it/vic/data/papers/2018-phd-jaspe-massive_3D_exploration.pdf}, } @Article{Pintore:2018:3FP, idxkey = {TOP-THEME-INDOOR}, idxproject = {3DCLOUDPRO,VIGEC}, author = {Giovanni Pintore and Fabio Ganovelli and Ruggero Pintus and Roberto Scopigno and Enrico Gobbetti}, title = {{3D} floor plan recovery from overlapping spherical images}, journal = {Computational Visual Media}, publisher = pub-SV, address = pub-SV:adr, year = 2018, volume = {4}, number = {4}, month = dec, pages = {367--383}, doi = {10.1007/s41095-018-0125-9}, abstract = { We present a novel approach to automatically recover, from a small set of partially overlapping spherical images, an indoor structure representation in terms of a 3D floor plan registered with a set of 3D environment maps. We introduce several improvements over previous approaches based on color/spatial reasoning exploiting \emph{Manhattan World} priors. In particular, we introduce a new method for geometric context extraction based on a 3D facets representation, which combines color distribution analysis of individual images with sparse multi-view clues. Moreover, we introduce an efficient method to combine the facets from different points of view in a single consistent model, considering the reliability of the facets contribution. The resulting capture and reconstruction pipeline automatically generates 3D multi-room environments where most of the other previous approaches fail, such as in presence of hidden corners and large clutter, even without involving additional dense 3D data or tools. We demonstrate the effectiveness and performance of our approach on different real-world indoor scenes. Our test data will be released to allow for further studies and comparisons. }, thumbnail = {https://www.crs4.it/vic/data/papers/cvmj2018-3dfloorplan.jpg}, } @Article{Pintore:2018:R3E, idxkey = {TOP-THEME-INDOOR}, idxproject = {3DCLOUDPRO,VIGEC}, author = {Giovanni Pintore and Ruggero Pintus and Fabio Ganovelli and Roberto Scopigno and Enrico Gobbetti}, title = {Recovering {3D} existing-conditions of indoor structures from spherical images}, journal = {Computers \& Graphics}, publisher = pub-ELS, address = pub-ELS:adr, year = 2018, volume = {77}, number = {}, month = dec, pages = {16--29}, doi = {10.1016/j.cag.2018.09.013}, abstract = { We present a vision-based approach to automatically recover the 3D existing-conditions information of an indoor structure, starting from a small set of overlapping spherical images. The recovered 3D model includes the as-built 3D room layout with the position of important functional elements located on room boundaries. We first recover the underlying 3D structure as interconnected rooms bounded by walls. This is done by combining geometric reasoning under an Augmented Manhattan World model and Structure-from-Motion. Then, we create, from the original registered spherical images, 2D rectified and metrically scaled images of the room boundaries. Using those undistorted images and the associated 3D data, we automatically detect the 3D position and shape of relevant wall-, floor-, and ceiling-mounted objects, such as electric outlets, light switches, air-vents and light points. As a result, our system is able to quickly and automatically draft an as-built model coupled with its existing conditions using only commodity mobile devices. We demonstrate the effectiveness and performance of our approach on real-world indoor scenes and publicly available datasets. }, url = {https://www.crs4.it/vic/data/papers/cag2018-existing_conditions.pdf}, } @inproceedings{Agus:2018:HSA, idxproject = {VIGEC}, author={Marco Agus and C. Cali and A. {Tapia Morales} and H.O. Lehvaslaiho and Pierre Magistretti and Enrico Gobbetti and Markus Hadwiger}, title={Hyperquadrics for shape analysis of {3D} nanoscale reconstructions of brain cell nuclear envelopes}, booktitle={Proc. Smart Tools and Apps for Graphics}, month=oct, year=2018, doi = {10.2312/stag.20181304}, pages = {115--122}, abstract={ Shape analysis of cell nuclei is becoming increasingly important in biology and medicine. Recent results have identified that the significant variability in shape and size of nuclei has an important impact on many biological processes. Current analysis techniques involve automatic methods for detection and segmentation of histology and microscopy images, and are mostly performed in 2D. Methods for 3D shape analysis, made possible by emerging acquisition methods capable to provide nanometric-scale 3D reconstructions, are, however, still at an early stage, and often assume a simple spherical shape. We introduce here a framework for analyzing 3D nanoscale reconstructions of nuclei of brain cells (mostly neurons), obtained by semiautomatic segmentation of electron micrographs. Our method considers an implicit parametric representation customizing the hyperquadrics formulation of convex shapes. Point clouds of nuclear envelopes, extracted from image data, are fitted to our parametrized model, which is then used for performing statistical analysis and shape comparisons. We report on the preliminary analysis of a collection of 92 nuclei of brain cells obtained from a sample of the somatosensory cortex of a juvenile rat. }, url = {https://www.crs4.it/vic/data/papers/stag2018-hyperquadrics.pdf}, } @proceedings{Livesu:2018:STA, idxproject = {VIGEC}, editor = {Marco Livesu and Giovanni Pintore and Alberto Signoroni}, title = {Smart Tools and Apps for Graphics}, publisher = {Eurographics Association}, year = {2018}, isbn = {}, thumbnail = {https://www.crs4.it/vic/data/papers/stag2018.jpg}, } @InProceedings{Pintore:2018:R3I, idxproject = {VIGEC,3DCLOUDPRO}, author = {Giovanni Pintore and Fabio Ganovelli and Ruggero Pintus and Roberto Scopigno and Enrico Gobbetti}, title = {Recovering {3D} indoor floor plans by exploiting low-cost spherical photography}, booktitle = {Pacific Graphics 2018 Short Papers}, year = 2018, month = Oct, pages = {45--48}, ISSN = {}, ISBN = {}, DOI = {10.2312/pg.20181277}, abstract = { We present a novel approach to automatically recover, from a small set of partially overlapping panoramic images, an indoor structure representation in terms of a 3D floor plan registered with a set of 3D environment maps. Our improvements over previous approaches include a new method for geometric context extraction based on a 3D facets representation, which combines color distribution analysis of individual images with sparse multi-view clues, as well as an efficient method to combine the facets from different point-of-view in the same world space, considering the reliability of the facets contribution. The resulting capture and reconstruction pipeline automatically generates 3D multi-room environments where most of the other previous approaches fail, such as in presence of hidden corners, large clutter and sloped ceilings, even without involving additional dense 3D data or tools. We demonstrate the effectiveness and performance of our approach on different real-world indoor scenes. }, url = {https://www.crs4.it/vic/data/papers/pg2018s-indoorplan.pdf}, } @InProceedings{Pintus:2018:OSE, idxproject = {SCAN4RECO,VIGEC,VICLAB}, author = {Ruggero Pintus and Tinsae Dulecha and Alberto {Jaspe Villanueva} and Andrea Giachetti and Irina Ciortan and Enrico Gobbetti}, title = {Objective and Subjective Evaluation of Virtual Relighting from Reflectance Transformation Imaging Data}, booktitle = {The 15th Eurographics Workshop on Graphics and Cultural Heritage}, year = 2018, month = Oct, pages = {87--96}, ISSN = {}, ISBN = {}, DOI = {0.2312/gch.20181344}, abstract = { Reflectance Transformation Imaging (RTI) is widely used to produce relightable models from multi-light image collections. These models are used for a variety of tasks in the Cultural Heritage field. In this work, we carry out an objective and subjective evaluation of RTI data visualization. We start from the acquisition of a series of objects with different geometry and appearance characteristics using a common dome-based configuration. We then transform the acquired data into relightable representations using different approaches: PTM, HSH, and RBF. We then perform an objective error estimation by comparing ground truth images with relighted ones in a leave-one-out framework using PSNR and SSIM error metrics. Moreover, we carry out a subjective investigation through perceptual experiments involving end users with a variety of backgrounds. Objective and subjective tests are shown to behave consistently, and significant differences are found between the various methods. While the proposed analysis has been performed on three common and state-of-the-art RTI visualization methods, our approach is general enough to be extended and applied in the future to new developed multi-light processing pipelines and rendering solutions, to assess their numerical precision and accuracy, and their perceptual visual quality. }, thumbnail = {https://www.crs4.it/vic/data/papers/gch2018-rtieval.jpg}, } @InProceedings{Ciortan:2018:APC, idxproject = {SCAN4RECO}, author = {Irina Ciortan and Ruggero Pintus and Enrico Gobbetti and Andrea Giachetti}, title = {Aging Prediction of Cultural Heritage Samples Based on Surface Microgeometry}, booktitle = {The 15th Eurographics Workshop on Graphics and Cultural Heritage}, year = 2018, month = Oct, pages = {147--154}, ISSN = {}, ISBN = {}, DOI = {10.2312/gch.20181352}, abstract = { A critical and challenging aspect for the study of Cultural Heritage (CH) assets is related to the characterization of the materials that compose them and to the variation of these materials with time. In this paper, we exploit a realistic dataset of artificially aged metallic samples treated with different coatings commonly used for artworks' protection in order to evaluate different approaches to extract material features from high-resolution depth maps. In particular, we estimated, on microprofilometric surface acquisitions of the samples, performed at different aging steps, standard roughness descriptors used in materials science as well as classical and recent image texture descriptors. We analyzed the ability of the features to discriminate different aging steps and performed supervised classification tests showing the feasibility of a texture-based aging analysis and the effectiveness of coatings in reducing the surfaces' change with time. }, thumbnail = {https://www.crs4.it/vic/data/papers/gch2018-aging.jpg}, } @Article{Coggan:2018:PDS, idxkey = {}, idxproject = {}, author = {{Jay S.} Coggan and Corrado Cal\`i and Daniel Keller and Marco Agus and Daniya Boges and Marwan Abdellah and Kalpana Kare and Heikki Lehvaslaiho and Stefan Eilemann and {Renaud Blaise} Jolivet and Markus Hadwiger and Henry Markram and Felix Schuermann and {Pierre J.} Magistretti}, title = {A Process for Digitizing and Simulating Biologically Realistic Oligocellular Networks Demonstrated for the Neuro-Glio-Vascular Ensemble}, journal = {Frontiers in Neuroscience}, publisher = {}, address = {}, year = 2018, volume = {12}, number = {}, month = aug, pages = {664}, doi = { 10.3389/fnins.2018.00664}, abstract = { One will not understand the brain without an integrated exploration of structure and function, these attributes being two sides of the same coin: together they form the currency of biological computation. Accordingly, biologically realistic models require the re-creation of the architecture of the cellular components in which biochemical reactions are contained. We describe here a process of reconstructing a functional oligocellular assembly that is responsible for energy supply management in the brain and creating a computational model of the associated biochemical and biophysical processes. The reactions that underwrite thought are both constrained by and take advantage of brain morphologies pertaining to neurons, astrocytes and the blood vessels that deliver oxygen, glucose and other nutrients. Each component of this neuro-glio-vasculature ensemble (NGV) carries-out delegated tasks, as the dynamics of this system provide for each cell-type its own energy requirements while including mechanisms that allow cooperative energy transfers. Our process for recreating the ultrastructure of cellular components and modeling the reactions that describe energy flow uses an amalgam of state-of the-art techniques, including digital reconstructions of electron micrographs, advanced data analysis tools, computational simulations and in silico visualization software. While we demonstrate this process with the NGV, it is equally well adapted to any cellular system for integrating multimodal cellular data in a coherent framework. }, url = {}, thumbnail={}, note = {}, } @Article{Agus:2018:GGL, idxkey = {}, idxproject = {}, author = {Marco Agus and Daniya Boges and Nicolas Gagnon and Pierre J.Magistretti and Markus Hadwiger and Corrado Cal\`i}, title = {{GLAM}: Glycogen-derived Lactate Absorption Map for visual analysis of dense and sparse surface reconstructions of rodent brain structures on desktop systems and virtual environments}, journal = {Computers \& Graphics}, publisher = pub-ELS, address = pub-ELS:adr, year = 2018, volume = {74}, number = {}, month = aug, pages = {85--98}, doi = {10.1016/j.cag.2018.04.007}, abstract = { Human brain accounts for about one hundred billion neurons, but they cannot work properly without ultrastructural and metabolic support. For this reason, mammalian brains host another type of cells called “glial cells”, whose role is to maintain proper conditions for efficient neuronal function. One type of glial cell, astrocytes, are involved in particular in the metabolic support of neurons, by feeding them with lactate, one byproduct of glucose metabolism that they can take up from blood vessels, and store it under another form, glycogen granules. These energy-storage molecules, whose morphology resembles to spheres with a diameter ranging 10–80 nanometers roughly, can be easily recognized using electron microscopy, the only technique whose resolution is high enough to resolve them. Understanding and quantifying their distribution is of particular relevance for neuroscientists, in order to understand where and when neurons use energy under this form. To answer this question, we developed a visualization technique, dubbed GLAM (Glycogen-derived Lactate Absorption Map), and customized for the analysis of the interaction of astrocytic glycogen on surrounding neurites in order to formulate hypotheses on the energy absorption mechanisms. The method integrates high-resolution surface reconstruction of neurites, astrocytes, and the energy sources in form of glycogen granules from different automated serial electron microscopy methods, like focused ion beam scanning electron microscopy (FIB-SEM) or serial block face electron microscopy (SBEM), together with an absorption map computed as a radiance transfer mechanism. The resulting visual representation provides an immediate and comprehensible illustration of the areas in which the probability of lactate shuttling is higher. The computed dataset can be then explored and quantified in a 3D space, either using 3D modeling software or virtual reality environments. Domain scientists have evaluated the technique by either using the computed maps for formulating functional hypotheses or for planning sparse reconstructions to avoid excessive occlusion. Furthermore, we conducted a pioneering user study showing that immersive VR setups can ease the investigation of the areas of interest and the analysis of the absorption patterns in the cellular structures. }, url = {}, note = {}, } @Article{Ciortan:2018:ASC, idxproject = {SCAN4RECO,VIGEC,VICLAB}, author = {Irina Ciortan and Tinsae Dulecha and Andrea GIachetti and Ruggero Pintus and Alberto {Jaspe Villanueva} and Enrico Gobbetti}, title = {Artworks in the Spotlight: Characterization with a Multispectral Dome}, journal = {IOP Conference Series: Materials Science and Engineering}, volume={364}, number={1}, pages={012025}, year = 2018, doi = {10.1088/1757-899X/364/1/012025}, abstract = { We describe the design and realization of a novel multispectral light dome system and the associated software control and calibration tools used to process the acquired data, in a specialized pipeline geared towards the analysis of shape and appearance properties of cultural heritage items. The current prototype dome, built using easily available electronic and lighting components, can illuminate a target of size 20cm x 20cm from 52 directions uniformly distributed in a hemisphere. From each illumination direction, 3 LED lights cover the visible range of the electromagnetic spectrum, as well as long ultraviolet and near infrared. A dedicated control system implemented on Arduino boards connected to a controlling PC fully manages all lighting and a camera to support automated acquisition. The controlling software also allows real-time adjustment of the LED settings, and provides a live-view of the to-be-captured scene. We approach per-pixel light calibration by placing dedicated targets in the focal plane: four black reflective spheres for back-tracing the position of the LED lamps and a planar full-frame white paper to correct for the non-uniformity of radiance. Once the light calibration is safeguarded, the multispectral acquisition of an artwork can be completed in a matter of minutes, resulting in a spot-wise appearance profile, that stores at pixel level the per-frequency intensity value together with the light direction vector. By performing calibrated acquisition of multispectral Reflectance Transformation Imaging (RTI), with our analysis system it is possible to recover surface normals, to characterize matte and specular behavior of materials, and to explore different surface layers thanks to UV-VIS-IR LED light separation. To demonstrate the system features, we present the outcomes of the on-site capture of metallic artworks at the National Archaeological Museum of Cagliari, Sardinia. }, url = {https://www.crs4.it/vic/data/papers/heritech2018-dome.pdf}, } @InProceedings{Assarsson:2018:VDM, idxproject = {TDM,VIGEC,VICLAB}, author = {Ulf Assarsson and Markus Billeter and Dan Dolonius and Elmar Eisemann and Alberto {Jaspe Villanueva} and Leonardo Scandolo and Erik Sintorn}, title = {Voxel DAGs and Multiresolution Hierarchies: From Large-Scale Scenes to Pre-computed Shadows}, editor = {Tobias Ritschel and Alexandru Telea}, booktitle = {Proc. EUROGRAPHICS Tutorials}, year = 2018, month = apr, doi = {10.2312/egt.20181028}, pages = {}, abstract = { In this tutorial, we discuss voxel DAGs and multiresolution hierarchies, which are representations that can encode large volumes of data very efficiently. Despite a significant compression ratio, an advantage of these structures is that their content can be efficiently accessed in real-time. This property enables various applications. We begin the tutorial by introducing the concepts of sparsity and of coherency in voxel structures, and explain how a directed acyclic graph (DAG) can be used to represent voxel geometry in a form that exploits both aspects, while remaining usable in its compressed from for e.g. ray casting. In this context, we also discuss extensions that cover the time domain or consider an advanced encoding strategies exploiting symmetries and entropy. We then move on to voxel attributes, such as colors, and explain how to integrate such information with the voxel DAGs. We will provide implementation details and present methods for efficiently constructing the DAGs and also cover how to efficiently access the data structures with e.g. GPU-based ray tracers. The course will be rounded of with a segment on applications. We highlight a few examples and show their results. Pre-computed shadows are a special application, which will be covered in detail. In this context, we also explain how some of previous ideas contribute to multi-resolution hierarchies, which gives an outlook on the potential generality of the presented solutions. }, thumbnail = {https://www.crs4.it/vic/data/papers/eg2018-tutorial-voxeldags.jpg}, } @InCollection{Ciortan:2018:DMA, idxkey = {}, idxproject = {SCAN4RECO}, title = {A {DICOM}-Inspired Metadata Architecture for Managing Multimodal Acquisitions in Cultural Heritage}, author = {{Irina-Mihaela} {Ciortan} and Ruggero Pintus and Giacomo Marchioro and Claudia Daffara and Enrico Gobbetti and Andrea Giachetti}, booktitle = {Digital Cultural Heritage}, series = {Lecture Notes in Computer Science (LNCS)}, volume={10605}, publisher = {Springer International Publishing}, year = {2018}, pages={37--49}, abstract = { Quantitative and qualitative analyses of cultural heritage (CH) assets need to interconnect individual pieces of information, including a variety of multimodal acquisitions, to form a holistic compounded view of studied objects. The need for joint acquisition brings with it the requirement for defining a protocol to store, structure and support the interoperability of the multisource data. In our work, we are performing multiple imaging studies in order to analyze the material, to monitor the behavior and to diagnose the status of CH objects. In particular, we employ, in addition to coarse 3D scanning, two high-resolution surface data capture techniques: reflectance transformation imaging and microprofilometry. Given this multivariate input, we have defined a hierarchical data organization, similar to the one used in the medical field by the Digital Imaging and Communications in Medicine (DICOM) protocol, that supports pre-alignment of local patches with respect to a global model. Furthermore, we have developed two supporting tools for multi-modal data handling: one for metadata annotation and another one for image registration. In this work, we illustrate our approach and discuss its practical application in a case study on a real CH object -- a bronze bas-relief. }, url = {https://www.crs4.it/vic/data/papers/dch2018-dicom.pdf}, doi={10.1007/978-3-319-75826-8_4}, } @Article{Giachetti:2018:NFH, idxproject = {SCAN4RECO,VIGEC,VICLAB}, author = {Andrea Giachetti and Irina Ciortan and Claudia Daffara and Giacomo Marchioro and Ruggero Pintus and Enrico Gobbetti}, title = {A Novel Framework for Highlight Reflectance Transformation Imaging}, journal = {Computer Vision and Image Understanding}, volume = {168}, number = {}, pages = {118--131}, year = 2018, doi = {10.1016/j.cviu.2017.05.014}, issn = {1077-3142}, abstract = { We propose a novel pipeline and related software tools for processing the multi-light image collections (MLICs) acquired in different application contexts to obtain shape and appearance information of captured surfaces, as well as to derive compact relightable representations of them. Our pipeline extends the popular Highlight Reflectance Transformation Imaging (H-RTI) framework, which is widely used in the Cultural Heritage domain. We support, in particular, perspective camera modeling, per-pixel interpolated light direction estimation, as well as light normalization correcting vignetting and uneven non-directional illumination. Furthermore, we propose two novel easy-to-use software tools to simplify all processing steps. The tools, in addition to support easy processing and encoding of pixel data, implement a variety of visualizations, as well as multiple reflectance-model-fitting options. Experimental tests on synthetic and real-world MLICs demonstrate the usefulness of the novel algorithmic framework and the potential benefits of the proposed tools for end-user applications. }, url = {https://www.crs4.it/vic/data/papers/cviu2017-hrti.pdf}, } %################################ %### 2017 %################################ @InProceedings{Agus:2017:MG2, idxproject = {TDM,VIGEC,VICLAB}, author = {Marco Agus and Enrico Gobbetti and Fabio Marton and Giovanni Pintore and Pere-Pau V{\'a}zquez}, title = {Mobile Graphics}, booktitle = {SIGGRAPH Asia 2017 Courses}, year = 2017, month = nov, isbn = {978-1-4503-5403-5}, location = {Bangkok, Thailand}, pages = {12:1--12:259}, articleno = {12}, numpages = {259}, doi = {10.1145/3134472.3134483}, abstract = { The increased availability and performance of mobile graphics terminals, including smartphones and tablets with high resolution screens and powerful GPUs, combined with the increased availability of high-speed mobile data connections, is opening the door to a variety of networked graphics applications. In this world, native apps or mobile sites coexist to reach the goal of providing us access to a wealth of multimedia information while we are on the move. This half-day course provides a technical introduction to the mobile graphics world spanning the hardware-software spectrum, and explores the state of the art and key advances in specific application domains, including capture and acquisition, real-time high-quality 3D rendering and interactive exploration. }, url = {https://www.crs4.it/vic/data/papers/sa2017-course-mobile_graphics.pdf}, } @InProceedings{Pintore:2017:MCR, idxproject = {VIGEC,VICLAB}, author = {Giovanni Pintore and Fabio Ganovelli and Roberto Scopigno and Enrico Gobbetti}, title = {Mobile metric capture and reconstruction in indoor environments}, booktitle = {Proc. SIGGRAPH Asia Symposium on Mobile Graphics and Interactive Applications}, year = 2017, month = nov, pages = {1:1--1:5}, abstract = { Mobile devices have become progressively more attractive for solving environment sensing problems. Thanks to their multi-modal acquisition capabilities and their growing processing power, they can perform increasingly sophisticated computer vision and data fusion tasks. In this context, we summarize our recent advances in the acquisition and reconstruction of indoor structures, describing the evolution of the methods from current single-view approaches to novel mobile multi-view methodologies. Starting from an overview on the features and capabilities of current hardware (ranging from commodity smartphones to recent 360 degree cameras), we present in details specific real-world cases which exploit modern devices to acquire structural, visual and metric information. }, isbn = {978-1-4503-5410-3/17/11}, doi = {10.1145/3132787.3139202}, url = {https://www.crs4.it/vic/data/papers/sa17smgia-mobile-capture-indoor.pdf}, } @InProceedings{Giachetti:2017:MRA, idxproject = {SCAN4RECO,VIGEC,VICLAB}, author = {Andrea Giachetti and Irina Ciortan and Claudia Daffara and Ruggero Pintus and Enrico Gobbetti}, title = {Multispectral RTI Analysis of Heterogeneous Artworks}, booktitle = {The 14th Eurographics Workshop on Graphics and Cultural Heritage}, year = 2017, month = Sep, pages = {19--28}, ISSN = {2312-6124}, ISBN = {978-3-03868-037-6}, DOI = {10.2312/gch.20171288}, abstract = { We propose a novel multi-spectral reflectance transformation imaging (MS-RTI) framework for the acquisition and direct analysis of the reflectance behavior of heterogeneous artworks. Starting from free-form acquisitions, we compute per-pixel calibrated multi-spectral appearance profiles, which associate a reflectance value to each sampled light direction and frequency. Visualization, relighting, and feature extraction is performed directly on appearance profile data, applying scattered data interpolation based on Radial Basis Functions to estimate per-pixel reflectance from novel lighting directions. We demonstrate how the proposed solution can convey more insights on the object materials and geometric details compared to classical multi-light methods that rely on low-frequency analytical model fitting eventually mixed with a separate handling of high-frequency components, hence requiring constraining priors on material behavior. The flexibility of our approach is illustrated on two heterogeneous case studies, a painting and a dark shiny metallic sculpture, that showcase feature extraction, visualization, and analysis of high-frequency properties of artworks using multi-light, multi-spectral (Visible, UV and IR) acquisitions. }, url = {https://www.crs4.it/vic/data/papers/gch2017-msrtianalysis.pdf}, note = {Best paper award} } @InProceedings{Pintus:2017:GRM, idxproject = {SCAN4RECO,VIGEC,VICLAB}, author = {Ruggero Pintus and Andrea Giachetti and Giovanni Pintore and Enrico Gobbetti}, title = {Guided Robust Matte-Model Fitting for Accelerating Multi-light Reflectance Processing Techniques}, booktitle = {Proc. British Machine Vision Conference}, year = 2017, month = Sep, pages = {32.1--32.15}, doi = {10.5244/C.31.32}, abstract = { The generation of a basic matte model is at the core of many multi-light reflectance processing approaches, such as Photometric Stereo or Reflectance Transformation Imaging. To recover information on objects' shape and appearance, the matte model is used directly or combined with specialized methods for modeling high-frequency behaviors. Multivariate robust regression offers a general solution to reliably extract the matte component when source data is heavily contaminated by shadows, inter-reflections, specularity, or noise. However, robust multivariate modeling is usually very slow. In this paper, we accelerate robust fitting by drastically reducing the number of tested candidate solutions using a guided approach. Our method propagates already known solutions to nearby pixels using a similarity-driven flood-fill strategy, and exploits this knowledge to order possible candidate solutions and to determine convergence conditions. The method has been tested on objects with a variety of reflectance behaviors, showing state-of-the-art accuracy with respect to current solutions, and a significant speed-up without accuracy reduction with respect to multivariate robust regression. }, url = {https://www.crs4.it/vic/data/papers/bmvc2017-guidedrobustfitting.pdf}, } @InProceedings{Pintus:2017:AAM, idxproject = {SCAN4RECO,VIGEC,VICLAB}, author = {Ruggero Pintus and Ying Yang and Holly Rushmeier and Enrico Gobbetti}, title = {Automatic Algorithms for Medieval Manuscript Analysis}, editor = {}, booktitle = {Proc. 18th International Graphonomics Society Conference}, year = 2017, month = jun, doi = {}, pages = {}, abstract = { Massive digital acquisition and preservation of deteriorating historical and artistic documents is of particular importance due to their value and fragile condition. The study and browsing of such digital libraries is invaluable for scholars in the Cultural Heritage field, but requires automatic tools for analyzing and indexing these datasets. We will describe a set of completely automatic solutions to estimate per-page text leading, to extract text lines, blocks and other layout elements, and to perform query-by-example word-spotting on medieval manuscripts. Those techniques have been evaluated on a huge heterogeneous corpus of illuminated medieval manuscripts of different writing styles, languages, image resolutions, amount of illumination and ornamentation, and levels of conservation, with various problematic issues such as holes, spots, ink bleed-through, ornamentation, and background noise. We also present a quantitative analysis to better assess the quality of the proposed algorithms. By not requiring any human intervention to produce a large amount of annotated training data, the developed methods provide Computer Vision researchers and Cultural Heritage practitioners with a compact and efficient system for document analysis. }, url = {https://www.crs4.it/vic/data/papers/igs2017-manuscripts.pdf}, } @Article{Agus:2017:DAV, idxkey = {TOP-THEME-UI}, idxproject = {TDM,MONTEPRAMA4,VIGEC,VICLAB}, author = {Marco Agus and Fabio Marton and Fabio Bettio and Markus Hadwiger and Enrico Gobbetti}, title = {Data-driven analysis of virtual {3D} exploration of a large sculpture collection in real-world museum exhibitions}, journal = {ACM Journal on Computing and Cultural Heritage (JOCCH)}, volume = {11}, number = {1}, month = dec, pages = {2:1--2:20}, year = 2017, doi = {10.1145/3099618}, abstract = { We analyze use of an interactive system for the exploration of highly detailed 3D models of a collection of protostoric Mediterranean sculptures. In this system, when the object of interest is selected, its detailed 3D model and associated information are presented at high resolution on a large display controlled by a touch-enabled horizontal surface at a suitable distance. The user interface combines an object-aware interactive camera controller with an interactive point-of-interest selector and is implemented within a scalable implementation based on multiresolution structures shared between the rendering and user interaction subsystems. The system was installed in several temporary and permanent exhibitions, and was extensively used by tens of thousands of visitors. We provide a data-driven analysis of usage experience based on logs gathered during a 27 months period at four exhibitions in Archeological museums, for a total of more than 75K exploration sessions. We focus on discerning the main visitor behaviors during 3D exploration by employing tools for deriving interest measures on surfaces, and tools for clustering and knowledge discovery from high-dimensional data. The results highlight the main trends in visitor behavior during the interactive sessions. These results provide useful insights for the design of 3D exploration user interfaces in future digital installations. }, url = {https://www.crs4.it/vic/data/papers/jocch2017-exploration-analysis.pdf}, } @InProceedings{Agus:2017:MG, idxproject = {SCAN4RECO,VIGEC,VICLAB}, author = {Marco Agus and Enrico Gobbetti and Fabio Marton and Giovanni Pintore and Pere-Pau V{\'a}zquez}, title = {Mobile Graphics}, editor = {Adrien Bousseau and Diego Gutierrez}, booktitle = {Proc. EUROGRAPHICS Tutorials}, year = 2017, month = apr, doi = {10.2312/egt.20171032}, pages = {}, abstract = { The increased availability and performance of mobile graphics terminals, including smartphones and tablets with high resolution screens and powerful GPUs, combined with the increased availability of high-speed mobile data connections, is opening the door to a variety of networked graphics applications. In this world, native apps or mobile sites coexist to reach the goal of providing us access to a wealth of multimedia information while we are on the move. This half-day tutorial provides a technical introduction to the mobile graphics world spanning the hardware-software spectrum, and explores the state of the art and key advances in specific application domains, including capture and acquisition, real-time high-quality 3D rendering and interactive exploration. }, url = {https://www.crs4.it/vic/data/papers/eg2017-tutorial-mobile-graphics.pdf}, } @Article{Jaspe:2017:SSV, idxkey = {TOP-THEME-MASSIVE-MODELS}, idxproject = {DIVA,VIGEC}, author = {Alberto {Jaspe Villanueva} and Fabio Marton and Enrico Gobbetti}, title = {{Symmetry-aware Sparse Voxel DAGs} ({SSVDAGs}) for compression-domain tracing of high-resolution geometric scenes}, journal = {Journal of Computer Graphics Techniques}, volume = {6}, number = {2}, pages = {1--30}, year = 2017, issn = {2331-7418}, abstract = { Voxelized representations of complex 3D scenes are widely used nowadays to accelerate visibility queries in many GPU rendering techniques. Since GPU memory is limited, it is important that these data structures can be kept within a strict memory budget. Recently, directed acyclic graphs (DAGs) have been successfully introduced to compress sparse voxel octrees (SVOs), but they are limited to sharing identical regions of space. In this paper, we show that a more efficient lossless compression of geometry can be achieved, while keeping the same visibility-query performance, by merging subtrees that are identical through a similarity transform, and by exploiting the skewed distribution of references to shared nodes to store child pointers using a variabile bit-rate encoding. We also describe how, by selecting plane reflections along the main grid directions as symmetry transforms, we can construct highly compressed GPU-friendly structures using a fully out-of-core method. Our results demonstrate that state-of-the-art compression and real-time tracing performance can be achieved on high-resolution voxelized representations of real-world scenes of very different characteristics, including large CAD models, 3D scans, and typical gaming models, leading, for instance, to real-time GPU in-core visualization with shading and shadows of the full Boeing 777 at sub-millimetric precision. This article is based on an earlier work: \textit{SSVDAGs: Symmetry-aware Sparse Voxel DAGs, in Proceedings of the 20th ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games (c) ACM, 2016. 10.1145/2856400.2856420.} We include here a more thorough exposition, a description of alternative construction and tracing methods, as well as additional results. In order to facilitate understanding, evaluation and extensions, the full source code of the method is provided as accompanying material. }, url = {https://www.crs4.it/vic/data/papers/jcgt2017-ssvdags.pdf}, } @InBook{Pintus:2017:TSC, idxproject = {DMP,Scan4Reco,VIGEC}, author = {Ruggero Pintus and Enrico Gobbetti and Marco Callieri and Matteo Dellepiane}, title = {Techniques for seamless color registration and mapping on dense {3D} models}, editor = {Nicola Masini and Francesco Soldovieri}, booktitle = {Sensing the Past: From artifact to historical site}, publisher = {Springer}, month = {}, year = {2017}, pages = {355--376}, isbn={978-3-319-50518-3}, doi = {10.1007/978-3-319-50518-3_17}, abstract = { Today's most widely used 3D digitization approach is a combination of active geometric sensing, mainly using laser scanning, with active or passive color sensing, mostly using digital photography. Producing a seamless colored object, starting from a geometric representation and a set of photographs, is a data fusion problem requiring effective solutions for image-to-geometry registration, and color mapping and blending. This chapter provides a brief survey of the state-of-the-art solutions, ranging from manual approaches to fully scalable automated methods. }, url = {https://www.crs4.it/vic/data/papers/stp2017-color_registration_mapping.pdf}, } @Article{Yang:2017:ASP, idxproject = {VIGEC}, author = {Ying Yang and Ruggero Pintus and Enrico Gobbetti and Holly Rushmeier}, title = {Automatic Single Page-based Algorithms for Medieval Manuscript Analysis}, journal = {ACM Journal on Computing and Cultural Heritage (JOCCH)}, volume = {10}, number = {2}, pages = {9:1--9:22}, year = 2017, doi = {10.1145/2996469}, abstract = { We propose three automatic algorithms for analyzing digitized medieval manuscripts: text block computation, text line segmentation and special component extraction, by taking advantage of previous clustering algorithms and a template matching technique. These three methods are completely automatic, so that no user intervention or input is required to make them work. Moreover, they are all per-page based; that is, unlike some prior methods–which need a set of pages from the same manuscript for training purposes–they are able to analyze a single page without requiring any additional pages for input, eliminating the need for training on additional pages with similar layout. We extensively evaluated the algorithms on 1771 images of pages of 6 different publicly available historical manuscripts, which differ significantly from each other in terms of layout structure, acquisition resolution, and writing style, etc. The experimental results indicate that they are able to achieve very satisfactory performance, i.e., the average precision and recall values obtained by the text block computation method can reach as high as 98\% and 99\%, respectively.}, url = {https://www.crs4.it/vic/data/papers/jocch2017-autosingle-manuscript-analysis.pdf}, } @Article{Yang:2017:3SA, idxproject = {VIGEC}, author = {Ying Yang and Ruggero Pintus and Holly Rushmeier and Ioannis Ivrissimtzis}, title = {A {3D} Steganalytic Algorithm and Steganalysis-Resistant Watermarking}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = 2017, volume = {23}, number = {2}, month = feb, pages = {1002--1013}, doi = {10.1109/TVCG.2016.2525771}, abstract = { We propose a simple yet efficient steganalytic algorithm for watermarks embedded by two state-of-the-art 3D watermarking algorithms by Cho et al. The main observation is that while in a clean model the means/variances of Cho et al.'s normalized histogram bins are expected to follow a Gaussian distribution, in a marked model their distribution will be bimodal. The proposed algorithm estimates the number of bins through an exhaustive search and then the presence of a watermark is decided by a tailor made normality test or a t-test. We also propose a modification of Cho et al.'s watermarking algorithms with the watermark embedded by changing the histogram of the radial coordinates of the vertices. Rather than targeting a continuous statistics such as the mean or variance of the values in a bin, the proposed watermarking modifies a discrete statistic, which here is the height of the histogram bin, to achieve watermark embedding. Experimental results demonstrate that the modified algorithm offers not only better resistance against the steganalytic attack we developed, but also an improved robustness/capacity trade-off. }, url = {https://www.crs4.it/vic/data/papers/tvcg2017-3dsteganalysis_and_watermarking.pdf}, } @Article{Diaz:2017:ESE, idxkey = {}, idxproject = {DIVA}, author = {Jose D{\'i}az and Timo Ropinski and Isabel Navazo and Enrico Gobbetti and Pere-Pau V{\'a}zquez}, title = {An experimental study on the effects of shading in {3D} perception of volumetric models}, journal = {The Visual Computer}, volume = {33}, number = {1}, month = jan, pages = {47--61}, year = 2017, doi={10.1007/s00371-015-1151-6}, issn={1432--2315}, abstract = { Throughout the years, many shading techniques have been developed to improve the conveying of information in volume visualization. Some of these methods, usually referred to as realistic, are supposed to provide better cues for the understanding of volume data sets. While shading approaches are heavily exploited in traditional monoscopic setups, no previous study has analyzed the effect of these techniques in virtual reality. To further explore the influence of shading on the understanding of volume data in such environments, we carried out a user study in a desktop-based stereoscopic setup. The goals of the study were to investigate the impact of well-known shading approaches and the influence of real illumination on depth perception. Participants had to perform three different perceptual tasks when exposed to static visual stimuli. 45 participants took part in the study, giving us 1152 trials for each task. Results show that advanced shading techniques improve depth perception in stereoscopic volume visualization. As well, external lighting does not affect depth perception when these shading methods are applied. As a result, we derive some guidelines that may help the researchers when selecting illumination models for stereoscopic rendering. }, url = {https://www.crs4.it/vic/data/papers/tvc2015-perceptual.pdf}, } %################################ %### 2016 %################################ @InProceedings{Pintore:2016:MRE, idxproject = {VASCO,VIGEC,VICLAB}, author = {Giovanni Pintore and Fabio Ganovelli and Enrico Gobbetti and Roberto Scopigno}, title = {Mobile reconstruction and exploration of indoor structures exploiting omnidirectional images}, booktitle = {Proc. SIGGRAPH Asia Mobile Graphics and Interactive Applications}, year = 2016, month = dec, pages = {1:1--1:4}, abstract = { We summarize our recent advances in acquisition, reconstruction and exploration of indoor environments with the aid of mobile devices. Our methods enable casual users to quickly capture and recover multi-room structures coupled with their visual appearance, starting from panorama images generated with the built-in capabilities of modern mobile devices, as well as emerging low-cost 360$^\circ$ cameras. After introducing the reconstruction algorithms at the base of our approach, we show how to build applications able to generate 3D floor plans scaled to their real-world metric dimensions and capable to manage scene not necessary limited by \emph{Manhattan World} assumptions. Then, exploiting the resulting structural and visual model, we propose a client-server interactive exploration system implementing a low-DOF navigation interface, specifically developed for touch interaction on smartphones and tablets. }, isbn = {978-1-4503-4551-4/16/12}, doi = {10.1145/2999508.2999526}, url = {https://www.crs4.it/vic/data/papers/sa16smgia-mobile-indoor.pdf}, } @proceedings{Pintore:2016:STA, editor = {Giovanni Pintore and Filippo Stanco}, title = {Smart Tools and Apps for Graphics}, publisher = {Eurographics Association}, year = {2016}, isbn = {}, thumbnail = {https://www.crs4.it/vic/data/papers/stag2016.jpg}, } @inproceedings{Pintus:2016:PFR, idxproject = {SCAN4RECO,VIGEC,VICLAB}, author={Ruggero Pintus and Irina Ciortan and Andrea Giachetti and Enrico Gobbetti}, title={Practical Free-form {RTI} Acquisition with Local Spot Lights}, booktitle={Proc. Smart Tools and Apps for Graphics}, month=oct, year=2016, doi = {10.2312/stag.20161374}, pages = {}, abstract={ We present an automated light calibration pipeline for free-form acquisition of shape and reflectance of objects using common off-the-shelf illuminators, such as LED lights, that can be placed arbitrarily close to the objects. We acquire multiple digital photographs of the studied object shot from a stationary camera. In each photograph, a light is freely positioned around the object in order to cover a wide variety of illumination directions. While common free-form acquisition approaches are based on the simplifying assumptions that the light sources are either sufficiently far from the object that all incoming light can be modeled using parallel rays, or that lights are local points emitting uniformly in space, we use the more realistic model of a scene lit by a moving local spot light with exponential fall-off depending on the cosine of the angle between the spot light optical axis and the illumination direction, raised to the power of the spot exponent. We recover all spot light parameters using a multipass numerical method. First, light positions are determined using standard methods used in photometric stereo approaches. Then, we exploit measures taken on a Lambertian reference planar object to recover the spot light exponent and the per-image spot light optical axis; we minimize the difference between the observed reflectance and the reflectance synthesized by using the near-field Lambertian equation. The optimization is performed in two passes, first generating a starting solution and then refining it using a Levenberg-Marquardt iterative minimizer. We demonstrate the effectiveness of the method based on an error analysis performed on analytical datasets, as well as on real-world experiments. }, url = {https://www.crs4.it/vic/data/papers/stag2016-spotrti.pdf}, } @InProceedings{Gobbetti:2016:CVC, idxproject = {VIGEC}, author = {Enrico Gobbetti and Marco Agus and Fabio Bettio and Alberto {Jaspe Villanueva} and Fabio Marton and Ruggero Pintus and Giovanni Pintore and Antonio Zorcolo}, title = {CRS4 Visual Computing}, booktitle = {STAG 2016 Lab Presentations}, year = 2016, month = Oct, pages = {}, abstract = { This lab presentation briefly describes the Visual Computing group of the CRS4 research center. Established in 1996, the group primarily focuses on the study, development, and application of technology for acquisition, storage, processing, distribution, and interactive exploration of complex objects and environments. Research is widely published in major journals and conferences, and many of the developed technologies are used (or have been used) in as diverse real-world applications as internet geoviewing, scientific data analysis, surgical training, and cultural heritage study and valorization. }, url = {https://www.crs4.it/vic/data/papers/stag2016-crs4viclab.pdf}, note = {On USB stick only} } @InProceedings{Garro:2016:FMA, idxproject = {VASCO,VIGEC,VICLAB}, author = {Valeria Garro and Giovanni Pintore and Fabio Ganovelli and Enrico Gobbetti and Roberto Scopigno}, title = {Fast Metric Acquisition with Mobile Devices}, booktitle = {Proc. 21st International Workshop on Vision, Modeling and Visualization (VMV)}, year = 2016, month = Oct, pages = {29--36}, doi = {10.2312/vmv.20161339}, abstract = { We present a novel algorithm for fast metric reconstruction on mobile devices using a combination of image and inertial acceleration data. In contrast to previous approaches to this problem, our algorithm does not require a long acquisition time or intensive data processing and can be implemented entirely on common IMU-enabled tablet and smartphones. The method recovers real world units by comparing the acceleration values from the inertial sensors with the ones inferred from images. In order to cope with IMU signal noise, we propose a novel RANSAC-like strategy which helps to remove the outliers. We demonstrate the effectiveness and the accuracy of our method through an integrated mobile system returning point clouds in metric scale. }, url = {https://www.crs4.it/vic/data/papers/vmv2016-mobile_metric.pdf}, } @InProceedings{Agus:2016:PPE, idxproject = {VIGEC,VICLAB}, author = {Marco Agus and Alberto {Jaspe Villanueva} and Giovanni Pintore and Enrico Gobbetti}, title = {{PEEP}: Perceptually Enhanced Exploration of Pictures}, booktitle = {Proc. 21st International Workshop on Vision, Modeling and Visualization (VMV)}, year = 2016, month = Oct, pages = {93--100}, doi = {10.2312/vmv.20161347}, abstract = { We present a novel simple technique for rapidly creating and presenting interactive immersive 3D exploration experiences of 2D pictures and images of natural and artificial landscapes. Various application domains, ranging from virtual exploration of works of art to street navigation systems, can benefit from the approach. The method, dubbed PEEP, is motivated by the perceptual characteristics of the human visual system in interpreting perspective cues and detecting relative angles between lines. It applies to the common perspective images with zero or one vanishing points, and does not require the extraction of a precise geometric description of the scene. Taking as input a single image without other information, an automatic analysis technique fits a simple but perceptually consistent parametric 3D representation of the viewed space, which is used to drive an indirect constrained exploration method capable to provide the illusion of 3D exploration with realistic monocular (perspective and motion parallax) and binocular (stereo) depth cues. The effectiveness of the method is demonstrated on a variety of casual pictures and exploration configurations, including mobile devices. }, url = {https://www.crs4.it/vic/data/papers/vmv2016-peep.pdf}, } @InProceedings{Pintore:2016:MMV, idxproject = {VASCO,VIGEC,VICLAB}, author = {Giovanni Pintore and Fabio Ganovelli and Enrico Gobbetti and Roberto Scopigno}, title = {Mobile Mapping and Visualization of Indoor Structures to Simplify Scene Understanding and Location Awareness}, booktitle = {Computer Vision -- ECCV 2016 Workshops: Amsterdam, The Netherlands, October 8-10 and 15-16, 2016, Proceedings, Part II}, publisher = {Springer}, year = 2016, month = oct, pages = {130--145}, doi = {10.1007/978-3-319-48881-3_10}, abstract = { We present a technology to capture, reconstruct and explore multi-room indoor structures, starting from panorama images generated with the aid of commodity mobile devices. Our approach is motivated by the need for fast and effective systems to simplify indoor data acquisition, as required in many real-world cases where mapping the structure is more important than capturing 3D details, such as the design of smart houses or in the security domain. We combine and extend state-of-the-art results to obtain indoor models scaled to their real-world metric dimension, making them available for online exploration. Moreover, since our target is to assist end-users not necessarily skilled in virtual reality and 3D objects interaction, we introduce a client-server image-based navigation system, exploiting this simplified indoor structure to support a low-degree-of-freedom user interface. We tested our approach in several indoor environments and carried out a preliminary user study to assess the usability of the system by people without a specific technical background. }, url = {https://www.crs4.it/vic/data/papers/acvr2016-indoor.pdf}, } @InProceedings{Agus:2016:I3E, idxproject = {DMP,MONTEPRAMA4,VIGEC,VICLAB}, author = {Marco Agus and Fabio Marton and Fabio Bettio and Enrico Gobbetti}, title = {Interactive {3D} exploration of a virtual sculpture collection: an analysis of user behavior in museum setting}, booktitle = {The 13th Eurographics Workshop on Graphics and Cultural Heritage}, year = 2016, month = Oct, pages = {}, doi = { 10.2312/gch.20161393}, abstract = { We present a usage analysis of an interactive system for the exploration of highly detailed 3D models of a collection of protostoric mediterranean sculptures. In this system, after selecting the object of interest inside the collection, its detailed 3D model and associated information are presented at high resolution on a large vertical display controlled by a touch-enabled horizontal surface placed at a suitable distance in front of it. The indirect user interface combines an object-aware interactive camera controller with an interactive point-of-interest selector and is implemented within a scalable implementation based on multiresolution structures shared between the rendering and user interaction subsystems. The system has been installed in several temporary and permanent exhibitions, and has been extensively used by tens of thousands of visitors. We provide here a data-driven analysis of usage experience based on logs gathered during a 24 months period in four exhibitions in Archaeological museums, for a total of over 75K exploration sessions. The results highlight the main trends in visitor behavior during the interactive sessions, which can provide useful insights for the design of 3D exploration user interfaces in future digital installations. }, url = {https://www.crs4.it/vic/data/papers/gch2016-monteprama.pdf}, note = {Best paper award} } @InProceedings{Ciortan:2016:PRT, idxproject = {SCAN4RECO,VIGEC,VICLAB}, author = {Irina Ciortan and Ruggero Pintus and Giacomo Marchioro and Claudia Daffara and Andrea Giachetti and Enrico Gobbetti}, title = {A Practical Reflectance Transformation Imaging Pipeline for Surface Characterization in Cultural Heritage}, booktitle = {The 13th Eurographics Workshop on Graphics and Cultural Heritage}, year = 2016, month = Oct, pages = {}, doi = {10.2312/gch.20161396}, abstract = { We present a practical acquisition and processing pipeline to characterize the surface structure of cultural heritage objects. Using a free-form Reflectance Transformation Imaging (RTI) approach, we acquire multiple digital photographs of the studied object shot from a stationary camera. In each photograph, a light is freely positioned around the object in order to cover a wide variety of illumination directions. Multiple reflective spheres and white Lambertian surfaces are added to the scene to automatically recover light positions and to compensate for non-uniform illumination. An estimation of geometry and reflectance parameters (e.g., albedo, normals, polynomial texture maps coefficients) is then performed to locally characterize surface properties. The resulting object description is stable and representative enough of surface features to reliably provide a characterization of measured surfaces. We validate our approach by comparing RTI-acquired data with data acquired with a high-resolution microprofilometer. }, url = {https://www.crs4.it/vic/data/papers/gch2016-rti.pdf}, } @Article{Balsa:2016:DMP, idxkey = {}, idxproject = {DMP,DIVA,BIGDATA,HELIOS}, author = {Marcos {Balsa Rodriguez} and Marco Agus and Fabio Bettio and Fabio Marton and Enrico Gobbetti}, title = {{Digital Mont'e Prama}: Exploring large collections of detailed {3D} models of sculptures}, journal = {ACM Journal on Computing and Cultural Heritage (JOCCH)}, year = 2016, volume = {9}, number = {4}, month = sep, pages = {18:1--18:23}, doi = {10.1145/2915919}, issn = {}, publisher = {ACM}, address = {New York, NY, USA}, abstract = { We present and evaluate a scalable interactive system for the exploration of large collections of detailed 3D digital models of sculptures. The system has been applied to the valorization of the Mont'e Prama complex, an extraordinary collection of protostoric Mediterranean sculptures, which depict models of cone-shaped stone towers, as well as larger-than-life human figures. The software architecture is based on scalable components for efficient distribution and adaptive rendering of extremely detailed surface meshes with overlaid information. The user interface, based on a simple and effective interactive camera controller tailored for touch interaction, has been designed for targeting both small screens and large display systems. The system components have been integrated in different interactive applications, ranging from large-screen museum setups and low end mobile devices both with very high visual quality. The large scale system has been installed in a variety of temporal and permanent exhibitions, and has been extensively used by tens of thousands of visitors. We provide an early analysis in this paper of the data gathered during a 20 month period in the National Archaeological Museum in Cagliari and a 6 months period in the Civic Museum in Cabras, for a total of over 67K exploration sessions. }, url = {https://www.crs4.it/vic/data/papers/jocch2016-monteprama.pdf}, } @InCollection{Mures:2016:PCM, idxproject = {DIVA}, author = {Omar A. Mures and Alberto {Jaspe Villanueva} and Emilio J. Padr{\'o}n and Juan R. Rabu{\~n}al}, title = {Point Cloud Manager: Applications of a Middleware for Managing Huge Point Clouds}, srcurl = {https://www.igi-global.com/chapter/point-cloud-manager/157693}, editor = {Manoj Kumar Singh and Dileep Kumar G.}, booktitle = {Effective Big Data Management and Opportunities for Implementation}, publisher = {IGI Global}, month = {June}, year = {2016}, chapter = {13}, ISBN = {9781522501824}, doi = {10.4018/978-1-5225-0182-4.ch013}, abstract = { Recent advances in acquisition technologies, such as LIDAR and photogrammetry, have brought back to popularity 3D point clouds in a lot of fields of application of Computer Graphics: Civil Engineering, Architecture, Topography, etc. These acquisition systems are producing an unprecedented amount of geometric data with additional attached information, resulting in huge datasets whose processing and storage requirements exceed usual approaches, presenting new challenges that can be addressed from a Big Data perspective by applying High Performance Computing and Computer Graphics techniques. This chapter presents a series of applications built on top of Point Cloud Manager (PCM), a middleware that provides an abstraction for point clouds with arbitrary attached data and makes it easy to perform out-of-core operations on them on commodity CPUs and GPUs. Hence, different kinds of real world applications are tackled, showing both real-time and offline examples, and render-oriented and computation-related operations as well. }, url = {https://www.crs4.it/vic/data/papers/bigdatabook2016-pcm.pdf} } @proceedings{Gobbetti:2016:ESP, editor = {Enrico Gobbetti and Wes Bethel}, title = {Eurographics Symposium on Parallel Graphics and Visualization, Groningen, The Netherlands, 2016. Proceedings}, publisher = {Eurographics Association}, year = {2016}, isbn = {978-3-03868-006-2}, thumbnail = {https://www.crs4.it/vic/data/papers/egpgv2016.jpg} } @InProceedings{Ahmad:2016:ASB, idxproject = {VASCO}, author = {Alexandre Ahmad and Olivier Balet and Arjen Boin and Julien Castet and Maureen Donnelley and Fabio Ganovelli and George Kokkinis and Giovanni Pintore}, title = {Assessing the Security of Buildings: A Virtual Studio Solution}, booktitle = {13th International Conference for Crisis Response and Management (ISCRAM)}, pages = {}, address = {Conference held in Rio de Janeiro, Brazil}, month = {May}, year = {2016}, isbn = {}, abstract = {This paper presents an innovative IT solution, a virtual studio, enabling security professionals to formulate, test and adjust security measures to enhance the security of critical buildings. The concept is to virtualize the environment, enabling experts to examine and assess and improve on a building's security in a cost-effective and risk-free way. Our virtual studio solution makes use of the latest advances in computer graphics to reconstruct accurate blueprints as well as 3D representations of entire buildings in a very short timeframe. In addition, our solution enables the creation and simulation of multiple threat situations, allowing users to assess security procedures and various responses. Furthermore, we present a novel device, tailored to support collaborative security planning needs. Security experts from various disciplines evaluated our virtual studio solution, and their analysis is presented in this paper.}, url = {https://www.crs4.it/vic/data/papers/iscram2016-assessing_building_security.pdf}, } @InCollection{Mures:2016:VRP, idxproject = {DIVA}, author = {Omar A. Mures and Alberto {Jaspe Villanueva} and Emilio J. Padr{\'o}n and Juan R. Rabu{\~n}al}, title = {Virtual Reality and Point-based Rendering in Architecture and Heritage}, editor = {Giuseppe Amoruso}, booktitle = {Handbook of Research on Visual Computing and Emerging Geometrical Design Tools}, publisher = {IGI Global}, month = {April}, year = {2016}, chapter = {4}, ISBN = {9781522500292}, doi = {10.4018/978-1-5225-0029-2}, abstract = { Virtual Reality has been a hot research topic since the appearance of computer graphics, but lately there have been huge advances in the form of high quality and affordable commodity hardware, for example with headsets such as the Oculus Rift. The Rift is an upcoming head-mounted virtual reality display, which will soon be available for the mainstream, along with other similar new VR hardware: Sulon Cortex, CastAR, Altergaze, etc. These new devices also offer new possibilities in the field of Augmented Reality, up to now limited to tablets and smartphones as far as commodity hardware is concerned. In fact, Virtual Reality and Augmented Reality technologies have now achieved the point where it can effectively be applied in in conjunction with the aforementioned workflow will yield great advantages for architects, engineers and heritage professionals alike. This article shows new possibilities of application for Virtual Reality and Augmented Reality with massive point clouds in real world architectural and heritage workflows. }, srcurl = {https://www.igi-global.com/book/handbook-research-visual-computing-emerging/141947}, url = {https://www.crs4.it/vic/data/papers/vcegdt2016-pbr_architecture_heritage.pdf} } @InProceedings{Pintore:2016:OIC, idxproject = {VASCO}, author = {Giovanni Pintore and Valeria Garro and Fabio Ganovelli and Enrico Gobbetti and Marco Agus}, title = {Omnidirectional image capture on mobile devices for fast automatic generation of {2.5D} indoor maps}, booktitle = {Proc. IEEE Winter Conference on Applications of Computer Vision (WACV)}, year = 2016, month = Feb, pages = {1--9}, abstract = { We introduce a light-weight automatic method to quickly capture and recover 2.5D multi-room indoor environments scaled to real-world metric dimensions. To minimize the user effort required, we capture and analyze a single omnidirectional image per room using widely available mobile devices. Through a simple tracking of the user movements between rooms, we iterate the process to map and reconstruct entire floor plans. In order to infer 3D clues with a minimal processing and without relying on the presence of texture or detail, we define a specialized spatial transform based on catadioptric theory to highlight the room's structure in a virtual projection. From this information, we define a parametric model of each room to formalize our problem as a global optimization solved by Levenberg-Marquardt iterations. The effectiveness of the method is demonstrated on several challenging real-world multi-room indoor scenes. }, url = {https://www.crs4.it/vic/data/papers/wacv2016-panorama_indoor.pdf}, doi = {10.1109/WACV.2016.7477631}, } @InProceedings{Jaspe:2016:SSS, idxproject = {HELIOS,DIVA}, author = {Alberto {Jaspe Villanueva} and Fabio Marton and Enrico Gobbetti}, title = {{SSVDAGs}: Symmetry-aware {Sparse Voxel DAGs}}, booktitle = {Proc. ACM i3D}, year = 2016, month = Feb, pages = {7--14}, abstract = { Voxelized representations of complex 3D scenes are widely used nowadays to accelerate visibility queries in many GPU rendering techniques. Since GPU memory is limited, it is important that these data structures can be kept within a strict memory budget. Recently, directed acyclic graphs (DAGs) have been successfully introduced to compress sparse voxel octrees (SVOs), but they are limited to sharing identical regions of space. In this paper, we show that a more efficient lossless compression of geometry can be achieved, while keeping the same visibility-query performance, by merging subtrees that are identical through a similarity transform, and by exploiting the skewed distribution of references to shared nodes to store child pointers using a variabile bit-rate encoding. We also describe how, by selecting plane reflections along the main grid directions as symmetry transforms, we can construct highly compressed GPU-friendly structures using a fully out-of-core method. Our results demonstrate that state-of-the-art compression and real-time tracing performance can be achieved on high-resolution voxelized representations of real-world scenes of very different characteristics, including large CAD models, 3D scans, and typical gaming models, leading, for instance, to real-time GPU in-core visualization with shading and shadows of the full Boeing 777 at sub-millimetric precision. }, url = {https://www.crs4.it/vic/data/papers/i3d2016-symmetry-dags.pdf}, } @Article{Pintus:2016:SGA, idxkey = {}, idxproject = {}, author = {Ruggero Pintus and Kazim Pal and Ying Yang and Tim Weyrich and Enrico Gobbetti and Holly Rushmeier}, title = {A Survey of Geometric Analysis in Cultural Heritage}, journal = j-CG-FORUM, year = 2016, volume = {35}, number = {1}, pages = {4--31}, doi = {10.1111/cgf.12668}, abstract = { We present a review of recent techniques for performing geometric analysis in cultural heritage applications. The survey is aimed at researchers in the areas of computer graphics, computer vision, and cultural heritage computing, as well as to scholars and practitioners in the cultural heritage field. The problems considered include shape perception enhancement, restoration and preservation support, monitoring over time, object interpretation, and collection analysis. All of these problems typically rely on an understanding of the structure of the shapes in question at both a local and global level. In this survey, we discuss the different problem forms and review the main solution methods, aided by classification criteria based on the geometric scale at which the analysis is performed and the cardinality of the relationships among object parts exploited during the analysis. We finalize the report by discussing open problems and future perspectives. }, url = {https://www.crs4.it/vic/data/papers/cgf2015-survey_geometric_analysis_ch.pdf}, } %################################ %### 2015 %################################ @TechReport{Gobbetti:2016:OSR, idxproject = {DIVA}, author = {Enrico Gobbetti and Marcos {Balsa Rodriguez} and Jose {Diaz Iriberri} and Alberto {Jaspe Villanueva}}, title = {Output-sensitive Rendering on Lighf Field Displays}, type = {Deliverable}, number = {D3.1}, institution = {EU Project DIVA (FP7 290227)}, year = 2015, abstract = { This deliverable reports on the research results achieved in the field of Output Sensitive Rendering Techniques, covered in the project's Work Package 3. We list the contributions from the main involved partners, we summarize the project publications, classifying them in terms of the main data kind handled, list the awards received, and summarize the events that have used the developed technology. }, url = {https://www.crs4.it/vic/data/papers/DIVA-D3_1-Output_sensitive_rendering_on_3d_displays-CRS4.pdf}, } @InProceedings{Gobbetti:2015:CVC, idxproject = {DMP}, author = {Enrico Gobbetti}, title = {CRS4 Visual Computing}, booktitle = {STAG 2015 Lab Presentations}, year = 2015, month = Oct, pages = {}, abstract = { Established in 1996, the Visual Computing program of the CRS4 research center primarily focuses on the study, development, and application of technology for acquisition, storage, processing, distribution, and interactive exploration of complex objects and environments. Research is widely published in major journals and conferences, and many of the developed technologies have been used in as diverse real-world applications as internet geoviewing, scientific data analysis, surgical training, and cultural heritage study and valorization. }, url = {https://www.crs4.it/vic/data/papers/stag2015-crs4viclab.pdf}, } @InProceedings{Yang:2015:ACC, idxkey = {}, idxproject = {BIGDATA}, author = {Ying Yang and Ruggero Pintus and Holly Rushmeier and Enrico Gobbetti}, title = {Automated Color Clustering for Medieval Manuscript Analysis}, abstract = { Given a color image of a medieval manuscript page, we propose a simple, yet efficient algorithm for automatically estimating the number of its color-based pixel groups, $K$. We formulate this estimation as a minimization problem, where the objective function assesses the quality of a candidate clustering. Rather than using all the features of the given image, we carefully select a subset of features to perform clustering. The proposed algorithm was extensively evaluated on a dataset of 2198 images (1099 original images and their 1099 variants produced by modifying both spatial and spectral resolutions of the originals) from the Yale's Institute for the Preservation of Cultural Heritage (IPCH). The experimental results show that it is able to yield satisfactory estimates of $K$ for these test images. }, booktitle = {Proc. Digital Heritage}, pages = {101--104}, month = sep, year = 2015, organization = {}, publisher = {}, isbn = {978-1-5090-0254-2}, url = {https://www.crs4.it/vic/data/papers/dh2015-color_clustering.pdf} } @InProceedings{Pintus:2015:AWF, idxkey = {}, idxproject = {BIGDATA}, author = {Ruggero Pintus and Ying Yang and Holly Rushmeier and Enrico Gobbetti}, title = {An Automatic Word-spotting Framework for Medieval Manuscripts}, abstract = { We present a completely automatic and scalable framework to perform query-by-example word-spotting on medieval manuscripts. Our system does not require any human intervention to produce a large amount of annotated training data, and it provides Computer Vision researchers and Cultural Heritage practitioners with a compact and efficient system for document analysis. We have executed the pipeline both in a single-manuscript and a cross-manuscript setup, and we have tested it on a heterogeneous set of medieval manuscripts, that includes a variety of writing styles, languages, image resolutions, levels of conservation, noise and amount of illumination and ornamentation. We also present a precision/recall based analysis to quantitatively assess the quality of the proposed algorithm. }, booktitle = {Proc. Digital Heritage}, pages = {5--12}, month = sep, year = 2015, organization = {}, publisher = {}, isbn = {978-1-5090-0254-2}, url = {https://www.crs4.it/vic/data/papers/dh2015-word_spotting.pdf}, } @InProceedings{Balsa:2015:DMP, idxkey = {}, idxproject = {DMP,DIVA,HELIOS,BIGDATA}, author = {Marcos {Balsa Rodriguez} and Marco Agus and Fabio Bettio and Fabio Marton and Enrico Gobbetti}, title = {{Digital Mont'e Prama}: {3D} cultural heritage presentations in museums and anywhere}, abstract = { We present an interactive visualization system developed for the valorization of an extraordinary collection of protostoric Mediterranean sculptures, which depict models of buildings (cone-shaped stone towers), as well as larger-than-life human figures. The architecture is based on scalable components for efficient distribution and adaptive rendering of extremely detailed surface meshes, as well as a simple and effective interactive camera controller tailored for touch interaction. The user interface has been designed for targeting both small screens and large display systems, and in a way that casual users can easily and naturally explore the models with fast learning curves. Furthermore, a thumbnail-based point-of-interest selector enable users to explore 3D views with information presented as 2D overlays decorating the 3D scene. The system components have been integrated in different interactive applications, ranging from large-screen museal setups and low end mobile devices both with very high visual quality. The capabilities of the museal systems have been demonstrated in a variety of temporal and permanent exhibitions, where they have been extensively used by tens of thousands of visitors. }, booktitle = {Proc. Digital Heritage}, pages = {545--552}, month = sep, year = 2015, organization = {}, publisher = {}, isbn = {978-1-5090-0254-2}, url = {https://www.crs4.it/vic/data/papers/dh2015-monteprama.pdf}, note = {Best paper award} } @InProceedings{Giachetti:2015:LCQ, idxkey = {}, idxproject = {}, author = {Andrea Giachetti and Claudia Daffara and Carlo Reghelin and Enrico Gobbetti and Ruggero Pintus}, title = {Light calibration and quality assessment methods for Reflectance Transformation Imaging applied to artworks' analysis}, booktitle = {Proc. SPIE}, volume = {9527}, number = {}, pages = {95270B:95270B-10}, abstract = { In this paper we analyze some problems related to the acquisition of multiple illumination images for Polynomial Texture Maps (PTM) or generic Reflectance Transform Imaging (RTI). We show that intensity and directionality nonuniformity can be a relevant issue when acquiring manual sets of images with the standard highlight-based setup both using a flash lamp and a LED light. To maintain a cheap and flexible acquisition setup that can be used on field and by non-experienced users we propose to use a dynamic calibration and correction of the lights based on multiple intensity and direction estimation around the imaged object during the acquisition. Preliminary tests on the results obtained have been performed by acquiring a specifically designed 3D printed pattern in order to see the accuracy of the acquisition obtained both for spatial discrimination of small structures and normal estimation, and on samples of different types of paper in order to evaluate material discrimination. We plan to design and build from our analysis and from the tools developed and under development a set of novel procedures and guidelines that can be used to turn the cheap and common RTI acquisition setup from a simple way to enrich object visualization into a powerful method for extracting quantitative characterization both of surface geometry and of reflective properties of different materials. These results could have relevant applications in the Cultural Heritage domain, in order to recognize different materials used in paintings or investigate the ageing status of artifacts' surface. }, year = {2015}, doi = {10.1117/12.2184761}, url = {https://www.crs4.it/vic/data/papers/spie2015-rti.pdf}, } @Article{Kiran:2015:RAC, idxkey = {TOP-THEME-LIGHT-FIELD}, idxproject = {DIVA}, author = {Vamsi {Kiran Adhikarla} and Fabio Marton and Tibor Balogh and Enrico Gobbetti}, title = {Real-time adaptive content retargeting for live multi-view capture and light field display}, journal={The Visual Computer}, volume = {31}, number = {6--8}, pages = {1023--1032}, doi = {10.1007/s00371-015-1127-6}, year = 2015, abstract = { The discrete nature of multiprojector light field displays results in aliasing when rendering scene points at depths outside the supported depth of field causing visual discomfort. We propose an efficient on-the-fly content-aware real-time depth retargeting algorithm for live 3D light field video to increase the quality of visual perception on a cluster-driven multiprojector light field display. The proposed algorithm is embedded in an end-to-end real-time system capable of capturing and reconstructing light field from multiple calibrated cameras on a full horizontal parallax light field dis play. By automatically detecting salient regions of a scene, we solve an optimization to derive a non-linear operator to fit the whole scene within the comfortable viewing range of the light field display. We evaluate the effectiveness of our approach on synthetic and real world scenes. }, keywords={}, url = {https://www.crs4.it/vic/data/papers/tvc2015-holo-retargeting.pdf}, } @InProceedings{Diaz:2015:PEV, idxproject = {DIVA}, author = {Jose D{\'i}az and Timo Ropinski and Isabel Navazo and Enrico Gobbetti and Pere-Pau V{\'a}zquez}, title = {Perceptual effects of volumetric shading models in stereoscopic desktop-based environments}, booktitle = {Proc. Computer Graphics International}, year = 2015, pages = {1--10}, abstract = { Throughout the years, many shading techniques have been developed to improve the conveying of information in Volume Visualization. Some of these methods, usually referred to as realistic, are supposed to provide better cues for the understanding of volume data sets. While shading approaches are heavily exploited in traditional monoscopic setups, no previous study has analyzed the effect of these techniques in Virtual Reality. To further explore the influence of shading on the understanding of volume data in such environments, we carried out a user study in a desktop-based stereoscopic setup. The goals of the study were to investigate the impact of well-known shading approaches and the influence of real illumination on depth perception. Participants had to perform three different perceptual tasks when exposed to static visual stimuli. 45 participants took part in the study, giving us 1152 trials for each task. Results show that advanced shading techniques improve depth perception in stereoscopic volume visualization. As well, external lighting does not affect depth perception when these shading methods are applied. As a result, we derive some guidelines that may help the researchers when selecting illumination models for stereoscopic rendering.}, url = {https://www.crs4.it/vic/data/papers/cgi2015-perceptual.pdf}, } @Article{Balsa:2015:ARE, idxkey = {}, idxproject = {DMP,DIVA,HELIOS}, author = {Marcos {Balsa Rodriguez} and Marco Agus and Fabio Marton and Enrico Gobbetti}, title = {Adaptive Recommendations for Enhanced Non-linear Exploration of Annotated {3D} Objects}, journal = j-CG-FORUM, year = 2015, volume = 34, number = 3, pages = {41--50}, doi = {10.1111/cgf.12616}, abstract = { We introduce a novel approach for letting casual viewers explore detailed 3D models integrated with structured spatially associated descriptive information organized in a graph. Each node associates a subset of the 3D surface seen from a particular viewpoint to the related descriptive annotation, together with its author-defined importance. Graph edges describe, instead, the strength of the dependency relation between information nodes, allowing content authors to describe the preferred order of presentation of information. At run-time, users navigate inside the 3D scene using a camera controller, while adaptively receiving unobtrusive guidance towards interesting viewpoints and history- and location-dependent suggestions on important information, which is adaptively presented using 2D overlays displayed over the 3D scene. The capabilities of our approach are demonstrated in a real-world cultural heritage application involving the public presentation of sculptural complex on a large projection-based display. A user study has been performed in order to validate our approach. }, url = {https://www.crs4.it/vic/data/papers/ev2015-recommendations.pdf}, note = {Proc. EuroVis 2015}, } @inproceedings{Rushmeier:2015:ECO, idxproject = {YDC2-1}, title={Examples of challenges and opportunities in visual analysis in the digital humanities}, author={Holly Rushmeier and Ruggero Pintus and Ying Yang and Christiana Wong and David Li}, booktitle={Human Vision and Electronic Imaging XX}, year={2015}, organization={SPIE}, abstract={ The massive digitization of books and manuscripts has converted millions of works that were once only physical into electronic documents. This conversion has made it possible for scholars to study large bodies of work, rather than just individual texts. This has offered new opportunities for scholarship in the humanities. Much previous work on digital collections has relied on optical character recognition and focused on the textual content of books. New work is emerging that is analyzing the visual layout and content of books and manuscripts. We present two different digital humanities projects in progress that present new opportunities for extracting data about the past, with new challenges for designing systems for scholars to interact with this data. The first project we consider is the layout and spectral content of thousands of pages from medieval manuscripts. We present the techniques used to study content variations in sets of similar manuscripts, and to study material variations that may indicate the location of manuscript production. The second project is the analysis of representations in the complete archive of Vogue magazine over 120 years. We present samples of applying computer vision techniques to understanding the changes in representation of women over time. }, url = {https://www.crs4.it/vic/data/papers/hvei2015-examples.pdf}, } @inproceedings{Adikharla:2015:RCA, idxproject = {DIVA}, title={Real-time Content Adaptive Depth Retargeting for Light Field Displays}, author={{Vamsi Kiran} Adhikarla and Fabio Marton and Attila Barsi and {Peter Tamas} Kovacs and Tibor Balogh and Enrico Gobbetti}, booktitle={Eurographics Posters}, year={2015}, doi={10.2312/egp.20151035}, abstract={ Light field display systems present visual scenes using a set of directional light beams emitted from multiple light sources as if they are emitted from points in a physical scene. These displays offer better angular resolution and therefore provide more depth of field than other automultiscopic displays. However in some cases the size of a scene may still exceed the available depth range of a light field display. Thus, rendering on these displays requires suitable adaptation of 3D content for providing comfortable viewing experience. We propose a content adaptive depth retargeting method to automatically modify the scene depth to suit to the needs of a light field display. By analyzing the scene and using display specific parameters, we formulate and solve an optimization problem to non-linearly adapt the scene depth to display depth. Our method synthesizes the depth retargeted light field content in real-time for supporting interactive visualization and also preserves the 3D appearance of the displayed objects as much as possible. }, url = {https://www.crs4.it/vic/data/papers/eg2015p-retargeting.pdf} } @Article{Mattausch:2015:CCH, idxkey = {}, idxproject = {DIVA}, author = {Oliver Mattausch and Jiri Bittner and Alberto {Jaspe Villanueva} and Enrico Gobbetti and Michael Wimmer and Renato Pajarola}, title = {{CHC+RT}: Coherent Hierarchical Culling for Ray Tracing}, journal = j-CG-FORUM, year = 2015, volume = 34, number = 2, pages = {537--548}, doi = {10.1111/cgf.12582}, abstract = { We propose a new technique for in-core and out-of-core GPU ray tracing using a generalization of hierarchical occlusion culling in the style of the CHC++ method. Our method exploits the rasterization pipeline and hardware occlusion queries in order to create coherent batches of work for localized shader-based ray-tracing kernels. By combining hierarchies in both ray space and object space, the method is able to share intermediate traversal results among multiple rays. We exploit temporal coherence among similar ray sets between frames and also within the given frame. A suitable management of the current visibility state makes it possible to benefit from occlusion culling for less coherent ray types like diffuse reflections. Since large scenes are still a challenge for modern GPU ray tracers, our method is most useful for scenes with medium to high complexity, especially since our method inherently supports ray tracing highly complex scenes that do not fit in GPU memory. For in-core scenes our method is comparable to CUDA ray tracing and performs up to 5.94 times better than pure shader-based ray tracing. }, url = {https://www.crs4.it/vic/data/papers/eg2015-chc+rt.pdf}, note = {Proc. Eurographics 2015}, } @Article{Gobbetti:2015:DMP, idxkey = {}, idxproject = {DMP,DIVA,MONTEPRAMA1,MONTEPRAMA2,HELIOS}, author = {Enrico Gobbetti and Ruggero Pintus and Fabio Bettio and Fabio Marton and Marco Agus and Marcos {Balsa Rodriguez}}, title = {{Digital Mont'e Prama}: dalla digitalizzazione accurata alla valorizzazione di uno straordinario complesso statuario}, journal = {Archeomatica}, year = 2015, volume = 6, number = 1, pages = {10--14}, abstract = { The paper outlines the main outcomes of the Digital Mont'e Prama project, which started from a large scale acquisition campaign of the Mont'e Prama complex, an extraordinary collection of stone fragments from the Nuragic era, depicting larger-than-life archers, warriors, boxers, as well as small models of prehistoric nuraghe (cone-shaped stone towers). The acquisition campaign has covered 36 statues mounted on metallic supports, acquired at 0.25mm resolution, resulting in over 6200 range scans (over 1.3G valid samples) and over 3400 10Mpixel photographs. Innovative technologies were studied and developed in order to acquire, process, and reconstruct highly-detailed 3D representations of the statues. These digital surrogates, in addition to documenting the conservation status of the objects, are exploited for a variety of valorization applications, ranging from physical replicas for tactile museums, to network-based frameworks for interactive exploration on mobile devices and high-end projection-based interactive installation in museum settings. }, url = {https://www.crs4.it/vic/data/papers/archeomatica2015-digital_monteprama.pdf}, } @Article{Pintus:2015:FRF, idxkey = {TOP-THEME-ACQUISITION}, idxproject = {DMP,DIVA,MONTEPRAMA1}, author = {Ruggero Pintus and Enrico Gobbetti}, title = {A Fast and Robust Framework for Semi-Automatic and Automatic Registration of Photographs to {3D} Geometry}, journal = {ACM Journal on Computing and Cultural Heritage (JOCCH)}, year = 2015, volume = 7, number = 4, month = feb, pages = {23:1--23:23}, doi = {10.1145/2629514}, issn = {1556-4673}, publisher = {ACM}, address = {New York, NY, USA}, abstract = { We present a simple, fast and robust complete framework for 2D/3D registration capable to align in a semi-automatic or completely automatic manner a large set of unordered images to a massive point cloud. Our method converts the hard to solve image-to-geometry registration task in a Structure-from-Motion (SfM) plus a 3D/3D alignment problem. We exploit a SfM framework that, starting just from an unordered image collection, computes an estimate of the camera parameters and a sparse 3D geometry deriving from matched image features. We then coarsely register this model to the given 3D geometry by estimating a global scale and absolute orientation using two solutions: a minimal user intervention or a stochastic global point set registration approach. A specialized sparse bundle adjustment (SBA) step, that exploits the correspondence between the sparse geometry and the fine input 3D model, is then used to refine intrinsic and extrinsic parameters of each camera. Output data is suitable for photo blending frameworks to produce seamless colored models. The effectiveness of the method is demonstrated on a series of synthetic and real-world 2D/3D Cultural Heritage datasets. }, url = {https://www.crs4.it/vic/data/papers/jocch2015-3d2d_registration.pdf}, } @PhdThesis{Balsa:2015:SEH, idxproject = {DMP,DIVA}, author = {Marcos {Balsa Rodriguez}}, title = {Scalable Exploration of Highly Detailed and Annotated {3D} Models}, school = {PhD School of Mathematics and Computer Science, University of Cagliari, Italy}, year = 2015, abstract= { With the widespread availability of mobile graphics terminals and WebGL-enabled browsers, 3D graphics over the Internet is thriving. Thanks to recent advances in 3D acquisition and modeling systems, high-quality 3D models are becoming increasingly common, and are now potentially available for ubiquitous exploration. In current 3D repositories, such as Blend Swap, 3D Café or Archive3D, 3D models available for download are mostly presented through a few user-selected static images. Online exploration is limited to simple orbiting and/or low-fidelity explorations of simplified models, since photorealistic rendering quality of complex synthetic environments is still hardly achievable within the real-time constraints of interactive applications, especially on on low-powered mobile devices or script-based Internet browsers. Moreover, navigating inside 3D environments, especially on the now pervasive touch devices, is a non-trivial task, and usability is consistently improved by employing assisted navigation controls. In addition, 3D annotations are often used in order to integrate and enhance the visual information by providing spatially coherent contextual information, typically at the expense of introducing visual cluttering. In this thesis, we focus on efficient representations for interactive exploration and understanding of highly detailed 3D meshes on common 3D platforms. For this purpose, we present several approaches exploiting constraints on the data representation for improving the streaming and rendering performance, and camera movement constraints in order to provide scalable navigation methods for interactive exploration of complex 3D environments. Furthermore, we study visualization and interaction techniques to improve the exploration and understanding of complex 3D models by exploiting guided motion control techniques to aid the user in discovering contextual information while avoiding cluttering the visualization. We demonstrate the effectiveness and scalability of our approaches both in large screen museum installations and in mobile devices, by performing interactive exploration of models ranging from 9M triangles to 940M triangles. }, url = {https://www.crs4.it/vic/data/papers/2015-phd-balsa-scalable_3d_exploration.pdf}, } %################################ %### 2014 %################################ @InProceedings{Pintore:2014:IMI, idxproject = {VASCO}, author = {Giovanni Pintore and Marco Agus and Enrico Gobbetti}, title = {Interactive mapping of indoor building structures through mobile devices}, booktitle = {Proc. 2nd International Conference on 3D Vision}, year = 2014, month = Dec, volume = {2}, pages = {103--110}, doi = {10.1109/3DV.2014.40}, ISSN={1550-6185}, abstract = { We present a practical system to map and reconstruct multi-room indoor structures using the sensors commonly available in commodity smartphones. Our approach combines and extends state-of-the-art results to automatically generate floor plans scaled to real-world metric dimensions and to reconstruct scenes not necessarily limited to the Manhattan World assumption. In contrast to previous works, our method introduces an interactive method based on statistical indicators for refining wall orientations and a specialized merging algorithm for building the final rooms shape. The low CPU cost of the method makes it possible to support full execution by commodity smartphones, without the need of connecting them to a compute server. We demonstrate the effectiveness of our technique on a variety of multi-room indoor scenes, achieving remarkably better results than previous approaches. }, keywords={}, url = {https://www.crs4.it/vic/data/papers/3dvbe2014-interactive_mapping.pdf}, } @InProceedings{Pintus:2014:ATA, idxproject = {YDC2-1}, author = {Ruggero Pintus and Ying Yang and Enrico Gobbetti and Holly Rushmeier}, title = {{A TaLISMAN}: Automatic Text and LIne Segmentation of historical MANuscripts}, booktitle = {The 12th Eurographics Workshop on Graphics and Cultural Heritage}, year = 2014, month = Oct, pages = {35--44}, doi = {10.2312/gch.20141302}, abstract = { Historical and artistic handwritten books are valuable cultural heritage (CH) items, as they provide information about tangible and intangible cultural aspects from the past. Massive digitization projects have made these kind of data available to a world-wide population, and pose real challenges for automatic processing. In this scenario, document layout analysis plays a significant role, being a fundamental step of any document image understanding system. In this paper, we present a completely automatic algorithm to perform a robust text segmentation of old handwritten manuscripts on a per-book basis, and we show how to exploit this outcome to find two layout elements, i.e., text blocks and text lines. Our proposed technique have been evaluated on a large and heterogeneous corpus content, and our experimental results demonstrate that this approach is efficient and reliable, even when applied to very noisy and damaged books. }, url = {https://www.crs4.it/vic/data/papers/gch2014-talisman.pdf}, } @InProceedings{Pintus:2014:GAC, idxproject = {YDC2-1}, author = {Ruggero Pintus and Kazim Pal and Ying Yang and Tim Weyrich and Enrico Gobbetti and Holly Rushmeier}, title = {Geometric Analysis in Cultural Heritage}, booktitle = {The 12th Eurographics Workshop on Graphics and Cultural Heritage - STARS Proceedings}, year = 2014, month = Oct, pages = {117-133}, doi = {10.2312/gch.20141310}, abstract = { We present a review of recent techniques for performing geometric analysis in cultural heritage applications, targeting the broad community of researchers and practitioners in cultural heritage computing. The problems considered include shape perception enhancement, restoration and preservation support, monitoring over time, object interpretation, and collection analysis. All of these problems typically rely on an understanding of the structure of the shapes in question at both a local and global level. In this survey, we discuss the different problem forms and review the main solution methods, aided by classification criteria based on the geometric scale at which the analysis is performed and the cardinality of the relationships among object parts exploited during the analysis. We finalize the report by discussing open problems and future perspectives. }, url = {https://www.crs4.it/vic/data/papers/gch2014-star-geometry-analysis.pdf}, } @InProceedings{Agus:2014:SSO, idxproject = {DMP,DIVA}, author = {Marco Agus and Enrico Gobbetti and Alberto {Jaspe Villanueva} and Claudio Mura and Renato Pajarola}, title = {SOAR: Stochastic Optimization for Affine global point set Registration}, booktitle = {Proc. 19th International Workshop on Vision, Modeling and Visualization (VMV)}, year = 2014, month = Oct, pages = {103-110}, doi = {10.2312/vmv.20141282}, abstract = { We introduce a stochastic algorithm for pairwise affine registration of partially overlapping 3D point clouds with unknown point correspondences. The algorithm recovers the globally optimal scale, rotation, and translation alignment parameters and is applicable in a variety of difficult settings, including very sparse, noisy, and outlier-ridden datasets that do not permit the computation of local descriptors. The technique is based on a stochastic approach for the global optimization of an alignment error function robust to noise and resistant to outliers. At each optimization step, it alternates between stochastically visiting a generalized BSP-tree representation of the current solution landscape to select a promising transformation, finding point-to-point correspondences using a GPU-accelerated technique, and incorporating new error values in the BSP tree. In contrast to previous work, instead of simply constructing the tree by guided random sampling, we exploit the problem structure through a low-cost local minimization process based on analytically solving absolute orientation problems using the current correspondences. We demonstrate the quality and performance of our method on a variety of large point sets with different scales, resolutions, and noise characteristics. }, url = {https://www.crs4.it/vic/data/papers/vmv2014-soar.pdf}, } @InProceedings{Marton:2014:RDG, idxproject = {DIVA}, author = {Fabio Marton and {Jos\'e Antonio} {Iglesias Guiti\'an} and Jose Diaz and Enrico Gobbetti}, title = {Real-time deblocked GPU rendering of compressed volumes}, booktitle = {Proc. 19th International Workshop on Vision, Modeling and Visualization (VMV)}, year = 2014, month = Oct, pages = {167-174}, doi = {10.2312/vmv.20141290}, abstract = { The wide majority of current state-of-the-art compressed GPU volume renderers are based on block-transform coding, which is susceptible to blocking artifacts, particularly at low bit-rates. In this paper we address the problem for the first time, by introducing a specialized deferred filtering architecture working on block-compressed data and including a novel deblocking algorithm. The architecture efficiently performs high quality shading of massive datasets by closely coordinating visibility- and resolution-aware adaptive data loading with GPU-accelerated per-frame data decompression, deblocking, and rendering. A thorough evaluation including quantitative and qualitative measures demonstrates the performance of our approach on large static and dynamic datasets including a massive $512^4$ turbulence simulation (256GB), which is aggressively compressed to less than $2$ GB, so as to fully upload it on graphics board and to explore it in real-time during animation. }, url = {https://www.crs4.it/vic/data/papers/vmv2014-volume-deblocking.pdf}, } @inproceedings{Ahmad:2014:BIT, idxproject = {VASCO}, author={Arjen Boin and Frederik Bynander and Giovanni Pintore and Fabio Ganovelli and George Leventakis and Alexandre Ahmad and Olivier Balet}, title={Building an {IT} Platform for Strategic Crisis Management Preparation}, booktitle={10th International Conference on Wireless and Mobile Computing, Networking and Communications (WiMob)}, address={Larnaca, Cyprus}, month=oct, year=2014, pages={20--27}, keywords={Crisis Management; Common Operational Picture; Indoor reconstruction; Large scale exercises}, abstract={This paper presents the result of the work achieved by a European consortium, which has as goal to build an innovative system to assist security managers in the crisis preparation, training and management phases. The iterative approach of the consortium is presented, as well as the results. An novel interactive and shared Common Operational Picture is proposed which has been validated by three large scale demonstrations. On-going and future work focusing on the security of building interiors is moreover presented.}, url = {https://www.crs4.it/vic/data/papers/en4ppdr2014-vasco.pdf}, doi={10.1109/WiMOB.2014.6962144} } @inproceedings{Taibo:2014:PLR, idxproject = {DIVA}, author={Javier Taibo and Alberto {Jaspe Villanueva} and Antonio Seoane and Marco Agus and Luis Hernandez}, title={Practical line rasterization for multi-resolution textures}, booktitle={Proc. Smart Tools and Apps for Graphics}, month=sep, year=2014, doi = {10.2312/stag.20141234}, pages = {9--18}, abstract={Draping 2D vectorial information over a 3D terrain elevation model is usually performed by real-time rendering to texture. In the case of linear feature representation, there are several specific problems using the texturing approach, specially when using multi-resolution textures. These problems are related to visual quality, aliasing artifacts and rendering performance. In this paper, we address the problems of 2D line rasterization on a multi-resolution texturing engine from a pragmatical point of view; some alternative solutions are presented, compared and evaluated. For each solution we have analyzed the visual quality, the impact on the rendering performance and the memory consumption. The study performed in this work is based on an OpenGL implementation of a clipmap-based multi-resolution texturing system, and is oriented towards the use of inexpensive consumer graphics hardware.}, url = {https://www.crs4.it/vic/data/papers/stag2014-line_rasterization.pdf}, } @proceedings{Polys:2014:WPN, editor = {Nicholas F. Polys and Alain Chesnais and Enrico Gobbetti and Juergen Doellner}, title = {Web3D '14: Proceedings of the Nineteenth International ACM Conference on 3D Web Technologies}, year = {2014}, isbn = {978-1-4503-3015-2}, location = {Vancouver, British Columbia, Canada}, publisher = {ACM}, address = {New York, NY, USA}, url = {https://www.crs4.it/vic/data/papers/web3d2014-frontmatter.pdf}, } @Article{Mura:2014:ARD, idxkey = {TOP-THEME-ACQUISITION}, idxproject = {DIVA}, author = {Claudio Mura and Oliver Mattausch and Alberto {Jaspe Villanueva} and Enrico Gobbetti and Renato Pajarola}, title = {Automatic Room Detection and Reconstruction in Cluttered Indoor Environments with Complex Room Layouts}, journal = {Computers \& Graphics}, publisher = pub-ELS, address = pub-ELS:adr, year = 2014, volume = {44}, number = {}, month = nov, pages = {20--32}, doi = {10.1016/j.cag.2014.07.005}, abstract = { We present a robust approach for reconstructing the main architectural structure of complex indoor environments given a set of cluttered 3D input range scans. Our method uses an efficient occlusion-aware process to extract planar patches as candidate walls, separating them from clutter and coping with missing data, and automatically extracts the individual rooms that compose the environment by applying a diffusion process on the space partitioning induced by the candidate walls. This diffusion process, which has a natural interpretation in terms of heat propagation, makes our method robust to artifacts and other imperfections that occur in typical scanned data of interiors. For each room, our algorithm reconstructs an accurate polyhedral model by applying methods from robust statistics. We demonstrate the validity of our approach by evaluating it on both synthetic models and real-world 3D scans of indoor environments. }, url = {https://www.crs4.it/vic/data/papers/cag2014-indoor_architectural_reconstruction.pdf}, } @Article{Bettio:2014:MES, idxkey = {TOP-THEME-ACQUISITION}, idxproject = {DMP,DIVA,MONTEPRAMA1,MONTEPRAMA2}, author = {Fabio Bettio and Alberto {Jaspe Villanueva} and Emilio Merella and Fabio Marton and Enrico Gobbetti and Ruggero Pintus}, title = {{Mont'e Scan}: Effective Shape and Color Digitization of Cluttered {3D} Artworks}, journal = {ACM Journal on Computing and Cultural Heritage (JOCCH)}, volume = 8, number = 1, year = 2015, pages = {4:1--4:23}, doi = {10.1145/2644823}, abstract = { We propose an approach for improving the digitization of shape and color of 3D artworks in a cluttered environment using 3D laser scanning and flash photography. In order to separate clutter from acquired material, semi-automated methods are employed to generate masks used to segment the range maps and the color photographs. This approach allows the removal of unwanted 3D and color data prior to the integration of acquired data in a 3D model. Sharp shadows generated by flash acquisition are easily handled by this masking process, and color deviations introduced by the flash light are corrected at the color blending step by taking into account the geometry of the object. The approach has been evaluated on a large scale acquisition campaign of the Mont'e Prama complex. This site contains an extraordinary collection of stone fragments from the Nuragic era, which depict small models of prehistoric nuraghe (cone-shaped stone towers), as well as larger-than-life archers, warriors, and boxers. The acquisition campaign has covered 37 statues mounted on metallic supports. Color and shape were acquired at a resolution of 0.25mm, which resulted in over 6200 range maps (about 1.3G valid samples) and 3817 photographs. }, url = {https://www.crs4.it/vic/data/papers/jocch2014-clutter_monteprama.pdf}, } @InProceedings{Yang:2014:ASA, idxproject = {YDC2-1}, author = {Ying Yang and Ruggero Pintus and Holly Rushmeier and Ioannis Ivrissimtzis}, title = {A Steganalytic Algorithm for {3D} Polygonal Meshes}, booktitle={20th IEEE International Conference on Image Processing (ICIP)}, organization={IEEE}, year = 2014, pages = {4782--4786}, doi={10.1109/ICIP.2014.7025969}, abstract = {We propose a steganalytic algorithm for watermarks embedded by Cho et al. mean-based algorithm. The main observation is that while in a clean model the means of Cho et al. normalized histogram bins are expected to follow a Gaussian distribution, in a marked model their distribution will be bimodal. The proposed algorithm estimates the number of bins through an exhaustive search and then the presence of a watermark is decided by a tailor made normality test. We also propose a modification of Cho et al. algorithm which is more resistant to the steganalytic attack and offers an improved robustness/capacity trade-off. }, url = {https://www.crs4.it/vic/data/papers/icip2014-steganalytic.pdf}, } @Article{Pintus:2014:ATH, idxproject = {YDC2-1}, author = {Ruggero Pintus and Ying Yang and Holly Rushmeier}, title = {{ATHENA}: Automatic Text Height ExtractioN for the Analysis of text lines in old handwritten manuscripts}, journal = {ACM Journal on Computing and Cultural Heritage (JOCCH)}, volume = {8}, number = {1}, pages = {1:1--1:25}, doi = {10.1145/2659020}, year = {2015}, abstract = { Massive digital acquisition and preservation of deteriorating historical and artistic documents is of particular importance due to their value and fragile condition. The study and browsing of such digital libraries is invaluable for scholars in the Cultural Heritage field, but requires automatic tools for analyzing and indexing these datasets. We present two completely automatic methods requiring no human intervention: text height estimation and text line extraction. Our proposed methods have been evaluated on a huge heterogeneous corpus of illuminated medieval manuscripts of different writing styles and with various problematic attributes, such as holes, spots, ink bleed-through, ornamentation, background noise, and overlapping text lines. Our experimental results demonstrate that these two new methods are efficient and reliable, even when applied to very noisy and damaged old handwritten manuscripts. }, url = {https://www.crs4.it/vic/data/papers/jocch2014-athena.pdf}, } @InProceedings{Balsa:2014:HHM, idxproject = {DMP,DIVA,TADES,MONTEPRAMA2}, author = {Marcos {Balsa Rodriguez} and Marco Agus and Fabio Marton and Enrico Gobbetti}, title = {{HuMoRS}: Huge models Mobile Rendering System}, abstract = { We present HuMoRS, a networked 3D graphics system for interactively streaming and exploring massive 3D mesh models on mobile devices. The system integrates a networked architecture for adaptive on-device rendering of multiresolution surfaces with a simple and effective interactive camera controller customized for touch-enabled mobile devices. During interaction, knowledge of the currently rendered scene is exploited to automatically center a rotation pivot and to propose context-dependent precomputed viewpoints. Both the object of interest and the viewpoint database are resident on a web server and adaptive transmission is demonstrated over wireless and phone connections in a Cultural Heritage application for the exploration of sub-millimetric colored reconstructions of stone statues. We report also on a preliminary user-study comparing the performances of our camera navigation method with respect to the most popular Virtual TrackBall implementations, with and without pivoting. }, booktitle = {Proc. ACM Web3D International Symposium}, pages = {7--16}, month = aug, year = 2014, organization = pub-ACM, publisher = pub-ACM:adr, url = {https://www.crs4.it/vic/data/papers/web3d2014-mobile-avt.pdf}, } @Article{Pintore:2014:EMM, idxproject = {VASCO}, author = {Giovanni Pintore and Enrico Gobbetti}, title = {Effective Mobile Mapping of Multi-room Indoor Structures}, journal={The Visual Computer}, volume = {30}, number = {6--8}, pages = {707--716}, year = 2014, abstract = { We present a system to easily capture building interiors and automatically generate floor plans scaled to their metric dimensions. The proposed approach is able to manage scenes not necessarily limited to the Manhattan World assumption, exploiting the redundancy of the instruments commonly available on commodity smartphones, such as accelerometer, magnetometer and camera. Without specialized training or equipment our system can produce a 2D floor plan and a representative 3D model of the scene accurate enough to be used for simulations and interactive applications. }, keywords={}, url = {https://www.crs4.it/vic/data/papers/tvc2014-mobile_mapping.pdf}, } @Article{Marton:2014:IIV, idxkey = {TOP-THEME-UI}, idxproject = {DMP,DIVA,MONTEPRAMA2}, author = {Fabio Marton and Marcos {Balsa Rodriguez} and Fabio Bettio and Marco Agus and Alberto {Jaspe Villanueva} and Enrico Gobbetti}, title = {{IsoCam}: Interactive Visual Exploration of Massive Cultural Heritage Models on Large Projection Setups}, journal = {ACM Journal on Computing and Cultural Heritage (JOCCH)}, month = jun, year = 2014, volume = 7, number = 2, pages = {Article 12}, abstract = { We introduce a novel user interface and system for exploring extremely detailed 3D models in a museum setting. 3D models and associated information are presented on a large projection surface controlled by a touch-enabled surface placed at a suitable distance in front of it. Our indirect user interface, dubbed IsoCam, combines an object-aware interactive camera controller with an interactive point-of-interest selector and is implemented within a scalable implementation based on multiresolution structures shared between the rendering and user interaction subsystems. The collision-free camera controller automatically supports the smooth transition from orbiting to proximal navigation, by exploiting a distance-field representation of the 3D object. The point-of-interest selector exploits a specialized view similarity computation to propose a few nearby easily reachable interesting 3D views from a large database, move the camera to the user-selected point of interest, and provide extra information through overlaid annotations of the target view. The capabilities of our approach have been demonstrated in a public event attended by thousands of people, which were offered the possibility to explore sub-millimetric reconstructions of 38 stone statues of the Mont'e Prama Nuragic complex, depicting larger-than-life human figures, and small models of prehistoric Nuraghe (cone-shaped stone towers). A follow-up of this work, using 2.5m-high projection screens, is now included in permanent exhibitions at two Archeological Museums. Results of a thorough user evaluation, involving quantitative and subjective measurements, are discussed. }, url = {https://www.crs4.it/vic/data/papers/jocch2014-isocam.pdf}, } @InProceedings{Mura:2014:RCI, idxproject = {DIVA}, author = {Claudio Mura and Alberto {Jaspe Villanueva} and Oliver Mattausch and Enrico Gobbetti and Renato Pajarola}, title = {Reconstructing Complex Indoor Environments with Arbitrary Wall Orientations}, abstract = { Reconstructing the architectural shape of indoor environments is a problem that is gaining increasing attention in the field of computer graphics. While some solutions have been proposed in recent years, cluttered environments with multiple rooms and non-vertical walls still represent a challenging input for state-of-the-art methods. We propose an occlusions-aware pipeline that extends current solutions to work with complex environments with arbitrary wall orientations. }, booktitle = {Proc. Eurographics Posters}, month = apr, year = 2014, pages = {19--20}, doi = {10.2312/egp.20141069}, publisher = {Eurographics Association}, url = {https://www.crs4.it/vic/data/papers/eg2014p-indoor_reconstruction.pdf}, } @Article{DiBenedetto:2014:EEC, idxkey = {TOP-THEME-MASSIVE-MODELS,TOP-THEME-MOBILE}, idxproject = {DIVA,VASCO}, author = {Marco {Di Benedetto} and Fabio Ganovelli and Marcos {Balsa Rodriguez} and Alberto {Jaspe Villanueva} and Roberto Scopigno and Enrico Gobbetti}, title = {{ExploreMaps}: Efficient Construction and Ubiquitous Exploration of Panoramic View Graphs of Complex {3D} Environments}, journal = j-CG-FORUM, year = 2014, volume = 33, number = 2, pages = {459--468}, doi = {10.1111/cgf.12334}, abstract = { We introduce a novel efficient technique for automatically transforming a generic renderable 3D scene into a simple graph representation named ExploreMaps, where nodes are nicely placed point of views, called probes, and arcs are smooth paths between neighboring probes. Each probe is associated with a panoramic image enriched with preferred viewing orientations, and each path with a panoramic video. Our GPU-accelerated unattended construction pipeline distributes probes so as to guarantee coverage of the scene while accounting for perceptual criteria before finding smooth, good looking paths between neighboring probes. Images and videos are precomputed at construction time with off-line photorealistic rendering engines, providing a convincing 3D visualization beyond the limits of current real-time graphics techniques. At run-time, the graph is exploited both for creating automatic scene indexes and movie previews of complex scenes and for supporting interactive exploration through a low-DOF assisted navigation interface and the visual indexing of the scene provided by the selected viewpoints. Due to negligible CPU overhead and very limited use of GPU functionality, real-time performance is achieved on emerging web-based environments based on WebGL even on low-powered mobile devices. }, note = {Proc. Eurographics 2014}, url = {https://www.crs4.it/vic/data/papers/eg2014-exploremaps.pdf}, } @Article{Balsa:2014:SCG, idxkey = {TOP-THEME-VOLUMETRIC}, idxproject = {DIVA}, author = {Marcos {Balsa Rodriguez} and Enrico Gobbetti and {Jos\'e Antonio} {Iglesias Guiti\'an} and Maxim Makhinya and Fabio Marton and Renato Pajarola and Susanne Suter}, title = {State-of-the-art in Compressed GPU-Based Direct Volume Rendering}, journal = {Computer Graphics Forum}, volume = {33}, number = {6}, month = sep, year = 2014, publisher = {Wiley}, doi = {10.1111/cgf.12280}, pages = {77-100}, abstract = { Great advancements in commodity graphics hardware have favored GPU-based volume rendering as the main adopted solution for interactive exploration of rectilinear scalar volumes on commodity platforms. Nevertheless, long data transfer times and GPU memory size limitations are often the main limiting factors, especially for massive, time-varying or multi-volume visualization, as well as for networked visualization on the emerging mobile devices. To address this issue, a variety of level-of-detail data representations and compression techniques have been introduced. In order to improve capabilities and performance over the entire storage, distribution and rendering pipeline, the encoding/decoding process is typically highly asymmetric, and systems should ideally compress at data production time and decompress on demand at rendering time. Compression and level-of-detail pre-computation does not have to adhere to real-time constraints and can be performed off-line for high quality results. In contrast, adaptive real-time rendering from compressed representations requires fast, transient, and spatially independent decompression. In this report, we review the existing compressed GPU volume rendering approaches, covering sampling grid layouts, compact representation models, compression techniques, GPU ren- dering architectures and fast decoding techniques.}, url = {https://www.crs4.it/vic/data/papers/cgf2014-star_compressed_gpu_dvr.pdf}, } %################################ %### 2013 %################################ @InProceedings{Mura:2013:RRI, idxkey = {}, idxproject = {DIVA}, author = {Claudio Mura and Oliver Mattausch and Alberto {Jaspe Villanueva} and Enrico Gobbetti and Renato Pajarola}, title = {Robust Reconstruction of Interior Building Structures with Multiple Rooms under Clutter and Occlusions}, abstract = { We present a robust approach for reconstructing the architectural structure of complex indoor environments given a set of cluttered input scans. Our method first uses an efficient occlusion-aware process to extract planar patches as potential wall segments, separating them from clutter and coping with missing data. Using a diffusion process to further increase its robustness, our algorithm is able to reconstruct a clean architectural model from those potential wall segments. To our knowledge, this is the first indoor reconstruction method which goes beyond a binary classification and auto- matically recognizes different rooms as separate components. We demonstrate the validity of our approach by testing it on both synthetic models and real-world 3d scans of indoor environments. }, booktitle = {Proc. 13th International Conference on Computer-Aided Design and Computer Graphics}, pages = {52--59}, month = nov, year = 2013, organization = {}, doi = {10.1109/CADGraphics.2013.14}, publisher = {IEEE}, url = {https://www.crs4.it/vic/data/papers/cadcg2013-robust_indoor_reconstruction.pdf}, } @InProceedings{Balsa:2013:CMSM, idxkey = {}, idxproject = {DIVA}, author = {Marcos {Balsa Rodr\'{i}guez} and Enrico Gobbetti and Fabio Marton and Alex Tinti}, title = {Coarse-grained Multiresolution Structures for Mobile Exploration of Gigantic Surface Models}, abstract = { We discuss our experience in creating scalable systems for distributing and rendering gigantic 3D surfaces on web environments and common handheld devices. Our methods are based on compressed streamable coarse-grained multiresolution structures. By combining CPU and GPU compression technology with our multiresolution data representation, we are able to incrementally transfer, locally store and render with unprecedented performance extremely detailed 3D mesh models on WebGL-enabled browsers, as well as on hardware-constrained mobile devices. }, booktitle = {Proc. SIGGRAPH Asia Symposium on Mobile Graphics and Interactive Applications}, pages = {4:1--4:6}, month = nov, year = 2013, organization = {}, publisher = {ACM}, doi = {10.1145/2543651.2543669}, url = {https://www.crs4.it/vic/data/papers/sigsmgia2013-coarse_grained_for_mobile.pdf}, note = {} } @InProceedings{Bettio:2013:IDS, idxkey = {}, idxproject = {DMP,DIVA,MONTEPRAMA1}, author = {Fabio Bettio and Enrico Gobbetti and Emilio Merella and Ruggero Pintus}, title = {Improving the digitization of shape and color of {3D} artworks in a cluttered environment}, abstract = { We propose an approach for improving the digitization of shape and color of 3D artworks in a cluttered environment using 3D laser scanning and flash photography. In order to separate clutter from acquired material, semi-automated methods are employed to generate masks for segment the 2D range maps and the color photographs, removing unwanted 3D and color data prior to 3D integration. Sharp shadows generated by flash acquisition are trivially handled by this masking process, and color deviations introduced by the flash light are corrected at color blending time by taking into account the object geo metry. The approach has been applied to, and evaluated on, a large scale acquisition campaign of the Mont'e Prama complex, an extraordinary collection of stone fragments from the Nuragic era, depicting larger-than-life archers, warriors, boxers, as well as small models of prehistoric nuraghe (cone-shaped stone towers). The acquisition campaign has covered 36 statues mounted on metallic supports, acquired at 0.25mm resolution, resulting in over 6200 range scans (over 1.3G valid samples) and 3426 10Mpixel photographs. }, booktitle = {Proc. Digital Heritage}, pages = {23--30}, month = oct, year = 2013, organization = {}, publisher = {}, url = {https://www.crs4.it/vic/data/papers/dh2013-clutter.pdf}, note = {Best Paper Award} } @InProceedings{Agus:2013:CDS, idxkey = {}, idxproject = {DIVA}, author = {Marco Agus and Enrico Gobbetti and Alberto {Jaspe Villanueva} and Giovanni Pintore and Ruggero Pintus}, title = {Automatic Geometric Calibration of Projector-based Light-field Displays}, abstract = { Continuous multiview (light-field) projection-based displays employ an array of projectors, mirrors, and a selectively transmissive screen to produce a light field. By appropriately modeling the display geometry, the light beams can emulate the emission from physical objects at fixed spatial locations, providing multiple freely moving viewers the illusion of interacting with floating objects. This paper presents a novel calibration method for this class of displays using a single uncalibrated camera and four fiducial markers. Calibration starts from a simple parametric description of the display layout. First, individual projectors are calibrated through parametric optimization of an idealized pinhole model. Then, the overall display and projector parameterization is globally optimized. Finally, independently for each projector, remaining errors are corrected through a rational 2D warping function. The final parameters are available to rendering engines to quickly compute forward and backward projections. The technique is demonstrated in the calibration of a large-scale horizontal-parallax-only 35MPixels light field display. }, booktitle = {Proc. Eurovis Short Papers}, pages = {1--5}, month = jun, year = 2013, organization = pub-EUROGRAPHICS, url = {https://www.crs4.it/vic/data/papers/eurovis2013s-holocalib.pdf}, } %publisher = pub-EUROGRAPHICS:adr, @InProceedings{Balsa:2013:CDS, idxkey = {TOP-THEME-MOBILE}, idxproject = {DIVA}, author = {Marcos {Balsa Rodriguez} and Enrico Gobbetti and Fabio Marton and Alex Tinti}, title = {Compression-domain Seamless Multiresolution Visualization of Gigantic Meshes on Mobile Devices}, abstract = { We present a software architecture for distributing and rendering gigantic 3D triangle meshes on common handheld devices. Our approach copes with strong bandwidth and hardware capabilities limitations in terms with a compression-domain adaptive multiresolution rendering approach. The method uses a regular conformal hierarchy of tetrahedra to spatially partition the input 3D model and to arrange mesh fragments at different resolution. We create compact GPU-friendly representations of these fragments by constructing cache-coherent strips that index locally quantized vertex data, exploiting the bounding tetrahedron for creating local barycentic parametrization of the geometry. For the first time, this approach supports local quantization in a fully adaptive seamless 3D mesh structure. For web distribution, further compression is obtained by exploiting local data coherence for entropy coding. At run-time, mobile viewer applications adaptively refine a local multiresolution model maintained in a GPU by asynchronously loading from a web server the required fragments. CPU and GPU cooperate for decompression, and a shaded rendering of colored meshes is performed at interactive speed directly from an intermediate compact representation using only 8bytes/vertex, therefore coping with both memory and bandwidth limitations. The quality and performance of the approach is demonstrated with the interactive exploration of gigatriangle-sized models on common mobile platforms. }, booktitle = {Proc. ACM Web3D International Symposium}, pages = {99--107}, month = jun, year = 2013, organization = pub-ACM, publisher = pub-ACM:adr, url = {https://www.crs4.it/vic/data/papers/web3d2013-ivbdiamond.pdf}, } @proceedings{Marton:2013:ESP, editor = {Fabio Marton and Kenneth Moreland}, title = {Eurographics Symposium on Parallel Graphics and Visualization, Girona, Spain, 2013. Proceedings}, publisher = {Eurographics Association}, year = {2013}, isbn = {978-3-905674-45-3}, thumbnail = {https://www.crs4.it/vic/data/papers/egpgv2013.jpg} } @InProceedings{Gobbetti:2013:CVC, idxproject = {}, author = {Enrico Gobbetti}, title = {CRS4 Visual Computing}, booktitle = {Eurographics Lab Presentations}, year = 2013, month = May, pages = {L03}, abstract = { This short presentation illustrates Visual Computing activities at the CRS4 research center. Research activities span many areas of computer graphics and computer vision, the primary focus being the study and development of scalable technology for acquiring, creating, distributing and exploring massive models, as well as for integrating them in real-time interactive visual simulations and virtual environments, both in local and distributed settings. }, url = {https://www.crs4.it/vic/data/papers/eg2013-crs4viclab.pdf}, } @InProceedings{Balsa:2013:SCG, idxproject = {DIVA}, author = {Marcos {Balsa Rodriguez} and Enrico Gobbetti and {Jos\'e Antonio} {Iglesias Guiti\'an} and Maxim Makhinya and Fabio Marton and Renato Pajarola and Susanne Suter}, title = {A Survey of Compressed GPU-based Direct Volume Rendering}, booktitle = {Eurographics State-of-the-art Report}, year = 2013, month = May, pages = {117--136}, abstract = { Great advancements in commodity graphics hardware have favored GPU-based volume rendering as the main adopted solution for interactive exploration of rectilinear scalar volumes on commodity platforms. Nevertheless, long data transfer times and GPU memory size limitations are often the main limiting factors, especially for massive, time-varying or multi-volume visualization, or for networked visualization on the emerging mobile devices. To address this issue, a variety of level-of-detail data representations and compression techniques have been introduced. In order to improve capabilities and performance over the entire storage, distribution and rendering pipeline, the encoding/decoding process is typically highly asymmetric, and systems should ideally compress at data production time and decompress on demand at rendering time. Compression and level-of-detail pre-computation does not have to adhere to real-time constraints and can be performed off-line for high quality results. In contrast, adaptive real-time rendering from compressed representations requires fast, transient, and spatially independent decompression. In this report, we review the existing compressed GPU volume rendering approaches, covering compact representation models, compression techniques, GPU rendering architectures and fast decoding techniques. }, url = {https://www.crs4.it/vic/data/papers/eg2013-star-compressed-gpu-dvr.pdf}, } @Article{Goswami:2013:EMF, idxproject = {DIVA}, author = {Prashant Goswami and Fatih Erol and Rahul Mukhi and Renato Pajarola and Enrico Gobbetti}, title = {An Efficient Multi-resolution Framework for High Quality Interactive Rendering of Massive Point Clouds using Multi-way kd-Trees}, journal={The Visual Computer}, volume = {29}, number = {1}, pages = {69--83}, year = 2013, abstract = { We present an efficient technique for out-of-core multi-resolution construction and high quality interactive visualization of massive point clouds. Our approach introduces a novel hierarchical level of detail (LOD) organization based on multi-way kd-trees, which simplifies memory management and allows control over the LOD-tree height. The LOD tree, constructed bottom up using a fast high-quality point simplification method, is fully balanced and contains all uniformly sized nodes. To this end, we introduce and analyze three efficient point simplification approaches that yield a desired number of high-quality output points. For constant rendering performance, we propose an efficient rendering-on-a-budget method with asynchronous data loading, which delivers fully continuous high quality rendering through LOD geo-morphing and deferred blending. Our algorithm is in corporated in a full end-to-end rendering system, which supports both local rendering and cluster-parallel distributed rendering. The method is evaluated on complex models made of hundreds of millions of point samples. }, keywords={Point-based rendering; Level-of-detail; Multi-way kd-tree; Entropy-based reduction; k-clustering; Parallel rendering; Geo-morphing}, url = {https://www.crs4.it/vic/data/papers/tvc2013-mwkdtrees.pdf}, issn={0178-2789}, doi = {10.1007/s00371-012-0675-2}, } %################################ %### 2012 %################################ @InProceedings{Balsa:2012:IEG, idxproject = {INDIGO, DIVA, DISTRICT-LAB3D}, author = {Marcos {Balsa Rodriguez} and Enrico Gobbetti and Fabio Marton and Ruggero Pintus and Giovanni Pintore and Alex Tinti}, title = {Interactive exploration of gigantic point clouds on mobile devices}, booktitle = {The 14th International Symposium on Virtual Reality, Archaeology and Cultural Heritage}, year = 2012, month = Nov, pages = {57--64}, abstract = { New embedded CPUs that sport powerful graphics chipsets have the potential to make complex 3D applications feasible on mobile devices. In this paper, we present a scalable architecture and its implementation for mobile exploration of large point clouds, which are nowadays ubiquitous in the cultural heritage domain thanks to the increased performance and availability of 3D scanning techniques. The quality and performance of our approach is demonstrated on gigantic point clouds, interactively explored on Apple iPad and iPhone devices using in variety of network settings. Applications of the technology include on-site exploration during scanning campaigns and promotion of cultural heritage artifacts. }, url = {https://www.crs4.it/vic/data/papers/vast2012-mobile.pdf}, } @Article{Marton:2012:NE3, idxkey = {TOP-THEME-UI, TCR, MR}, idxproject = {DIVA}, author = {Fabio Marton and Marco Agus and Enrico Gobbetti and Giovanni Pintore and Marcos {Balsa Rodriguez}}, title = {Natural exploration of {3D} massive models on large-scale light field displays using the {FOX} proximal navigation technique}, journal = {Computers \& Graphics}, publisher = pub-ELS, address = pub-ELS:adr, year = 2012, volume = {36}, number = {8}, month = dec, pages = {893--903}, abstract = { We report on a virtual environment for natural immersive exploration of extremely detailed surface models on multi-projector light field displays, which give multiple, freely moving, naked-eye viewers the illusion of seeing and manipulating 3D objects with continuous horizontal parallax. Our specialized 3D user interface, dubbed FOX (Focus Sliding Surface), allows inexperienced users to inspect 3D objects at various scales, integrating panning, rotating, and zooming controls into a single low-degree-of-freedom operation. At the same time, FOX takes into account the requirements for comfortable viewing on the light field display hardware, which has a limited field-of-view and a variable spatial resolution. Specialized multi-resolution structures, embedding a fine-grained, per-patch spatial index within a coarse-grained patch-based mesh structure, are exploited for fast batched I/O, GPU-accelerated rendering, and user-interaction-system-related geometric queries. The capabilities of the system are demonstrated by the interactive inspection of a giga-triangle dataset on a large-scale, 35 MPixel light field display controlled by wired or vision-based devices. Results of a thorough user evaluation, involving quantitative and subjective measurements, are discussed. }, url = {https://www.crs4.it/vic/data/papers/cag2012-fox.pdf}, doi = {10.1016/j.bbr.2011.03.031}, } @InProceedings{Gobbetti:2012:3NI, idxkey = {TOP-THEME-MOBILE}, idxproject = {DIVA}, author = {Enrico Gobbetti and Fabio Marton and Marcos {Balsa Rodriguez} and Fabio Ganovelli and Marco {Di Benedetto}}, title = {Adaptive Quad Patches: an Adaptive Regular Structure for Web Distribution and Adaptive Rendering of {3D} Models}, abstract = { We introduce an approach for efficient distribution and adaptive rendering of 3D mesh models supporting a simple quad parameterization. Our method extends and combines recent results in geometric processing, real-time rendering, and web programming. In particular: we exploit recent results on surface reconstruction and isometric parametrization to transform point clouds into two-manifold meshes whose parametrization domain is a small collection of 2D square regions; we encode the resulting parameterized meshes into a very compact multiresolution structures composed of variable resolution quad patches whose geometry and texture is stored in a tightly packed texture atlas; we adaptively stream and render variable resolution shape representations using a GPU-accelerated adaptive tessellation algorithm with negligible CPU overhead. Real-time performance is achieved on portable GPU platforms using OpenGL, as well as on exploiting emerging web-based environments based on WebGL. Promising applications of the technology range from the automatic creation of rapidly renderable objects for games to the set-up of browsable 3D models repositories in the web that will be accessible by upcoming generation of WebGL-enabled web browers. }, booktitle = {Proc. ACM Web3D International Symposium}, pages = {9--16}, month = aug, year = 2012, organization = pub-ACM, publisher = pub-ACM:adr, url = {https://www.crs4.it/vic/data/papers/web3d2012-aqp.pdf}, note = {(Best Long Paper Award)} } @InProceedings{Pintore:2012:3NI, idxkey = {}, idxproject = {INDIGO}, author = {Giovanni Pintore and Enrico Gobbetti and Fabio Ganovelli and Paolo Brivio}, title = {{3DNSITE}: A networked interactive {3D} visualization system to simplify location recognition in crisis management}, abstract = { We report on the 3DNSITE system, a web-based client-server 3D visualization tool for streaming and visualizing large tridimensional hybrid data (georeferenced point clouds and photographs with associated viewpoints and camera parameters). The system is moti- vated by the need to simplify data acquisition and location recognition for crisis managers and first responders during emergency operations or training sessions. In this peculiar context, it is very important to easily share 3D environment data among people in a distributed environment, accessing huge 3D models with embedded photographs on devices with heterogenous hardware capabilities and interconnected on different network types. Moreover, since the specific end-users are not necessary skilled with virtual reality and 3D objects interaction, the navigation interface must be simple and intuitive. Taking into account these constraints, we propose a mixel object-based/image-based system, which enhances the current state-of-the-art by exploiting a multi-resolution representation for the 3D model and a multi-level cache system for both the images and 3D models structure. A novel low-degree-of-freedom user interface is presented to navigate in the scenario with touchscreen devices. The proposed implementation, included in a more general training and decision framework for emergency operations, is evaluated on real-world datasets. }, booktitle = {Proc. ACM Web3D International Symposium}, pages = {59--67}, month = aug, year = 2012, organization = pub-ACM, publisher = pub-ACM:adr, url = {https://www.crs4.it/vic/data/papers/web3d2012-3dnsite.pdf}, } @proceedings{Childs:2012:ESP, idxkey = {}, idxproject = {DIVA}, editor = {Hank Childs and Torsten Kuhlen and Fabio Marton}, title = {Eurographics Symposium on Parallel Graphics and Visualization, EGPGV 2012, Cagliari, Italy, May 13-14, 2012. Proceedings}, publisher = {Eurographics Association}, year = {2012}, isbn = {978-3-905674-35-4}, thumbnail = {https://www.crs4.it/vic/data/papers/egpgv2012.jpg} } @InCollection{Vazquez:2012:PVR, idxkey = {}, idxproject = {DIVA}, title = {Practical Volume Rendering in mobile devices}, author = {{Pere Pau} {Vazquez Alcocer} and Marcos {Balsa Rodriguez}}, booktitle = {Proc. International Symposium on Visual Computing}, series = {Lecture Notes in Computer Science (LNCS)}, volume={7431}, publisher = {Springer Verlag}, year = {2012}, pages={708-718}, abstract = { Volume rendering has been a relevant topic in scientific visualization for the last two decades. A decade ago the exploration of reasonably big volume datasets required costly workstations due to the high processing cost of this kind of visualization. In the last years, a high end PC or laptop was enough to be able to handle medium-sized datasets thanks specially to the fast evolution of GPU hardware. New embedded CPUs that sport powerful graphics chipsets make complex 3D applications feasible in such devices. However, besides the much marketed presentations and all its hype, no real empirical data is usually available that makes comparing absolute and relative capabilities possible. In this paper we analyze current graphics hardware in most high-end Android mobile devices and perform a practical comparison of a well-known GPU-intensive task: volume rendering. We analyze different aspects by implementing three different classical algorithms and show how the current state-of-the art mobile GPUs behave in volume rendering. }, url = {https://www.crs4.it/vic/data/papers/isvc2012-pvr.pdf}, doi={10.1007/978-3-642-33179-4_67}, } @Article{Gobbetti:2012:CCO, idxkey = {TOP-THEME-VOLUMETRIC}, idxproject = {DIVA}, author = {Enrico Gobbetti and {Jos\'e Antonio} {Iglesias Guiti\'an} and Fabio Marton}, title = {COVRA: A compression-domain output-sensitive volume rendering architecture based on a sparse representation of voxel blocks}, journal = {Computer Graphics Forum}, volume = {31}, number = {3pt4}, year = 2012, publisher = {Blackwell Publishing Ltd}, issn = {1467-8659}, doi = {10.1111/j.1467-8659.2012.03124.x}, pages = {1315--1324}, abstract = { We present a novel multiresolution compression-domain GPU volume rendering architecture designed for interactive local and networked exploration of rectilinear scalar volumes on commodity platforms. In our approach, the volume is decomposed into a multiresolution hierarchy of bricks. Each brick is further subdivided into smaller blocks, which are compactly described by sparse linear combinations of prototype blocks stored in an overcomplete dictionary. The dictionary is learned, using limited computational and memory resources, by applying the K-SVD algorithm to a re-weighted non-uniformly sampled subset of the input volume, harnessing the recently introduced method of coresets. The result is a scalable high quality coding scheme, which allows very large volumes to be compressed off-line and then decom pressed on-demand during real-time GPU-accelerated rendering. Volumetric information can be maintained in compressed format through all the rendering pipeline. In order to efficiently support high quality filtering and shading, a specialized real-time renderer closely coordinates decompression with rendering, combining at each frame images produced by raycasting selectively decompressed portions of the current view- and transfer-function-dependent working set. The quality and performance of our approach is demonstrated on massive static and time-varying datasets. }, note = {Proc. Eurovis 2012}, url = {https://www.crs4.it/vic/data/papers/eurovis2012-covra.pdf}, } @InProceedings{Calderone:2012:ALP, idxkey = {}, idxproject = {DISTRICT-LAB3D}, author = {Manuela Calderone and Andrea Cereatti and Emilio Merella and Ugo Della Croce}, title = {Anatomical landmarks position estimation in incomplete {3D} humerus models}, abstract = { A general method for estimating the anatomical landmarks location on incomplete 3D bone model was proposed in order to use standardized anatomical frame conventions. An estimate of the location of missing anatomical landmarks was obtained by matching the incomplete bone model under analysis to a template of a complete bone model on which anatomical landmarks have been previously identified. The methodology was tested on three humeri. Results have shown that while the method provided reliable results when the models of the bone portion and of the complete bone belonged to the same subject, errors highly increased (up to 19 deg) when bone meshes, relative to different subjects, were used. }, booktitle = {Proc. GNB}, pages = {}, month = jun, year = 2012, url = {https://www.crs4.it/vic/data/papers/gnb2012-anatomical.pdf}, } @InCollection{Pintus:2012:PPS, idxkey = {}, idxproject = {DISTRICT-LAB3D}, title = {Acquisizione digitale multi-sensore di siti archeologici: il caso di Montessu}, author = {Ruggero Pintus and Enrico Gobbetti and Giuseppa Tanda and Massimo Vanzi}, booktitle = {La Preistoria e la protostoria della Sardegna, Atti della XLIV Riunione Scientifica dell'Istituto Italiano di Preistoria e Protostoria, Cagliari-Barumini-Sassari 23-28 novembre 2009}, series = {}, volume={3}, publisher = {IIPP - Istituto Italiano di Preistoria e Protostoria}, year = {2012}, pages={963-968}, abstract = { Nowadays, 3D acquisition devices allow us to rapidly capture the geometry and color. State-of-the-art techniques, such as those developed at CRS4, are able to semi-automatically transform acquired data into 3D digital representations of Cultural Heritage artifacts characterized by very high accuracy and wealth of details. This case study illustrates how these techniques are applied for the digital documentation of elements of the Montessu Necropolis. }, url = {https://www.crs4.it/vic/data/papers/iipp2012-multisensor.pdf}, isbn = {97 88 86045 094 4}, } @InProceedings{Himmelstein:2012:IST, idxkey = {}, idxproject = {INDIGO}, title = {Interactive Simulation Technology for Crisis Management and Training: The INDIGO Project}, author = {Jesse Himmelstein and Alexandre Ahmad and Olivier Balet and Jean-Baptiste {de la Rivi\`ere} and Maaike Schaap and Werner Overdijk and Enrico Gobbetti and Giovanni Pintore and Fabio Ganovelli and Paolo Brivio}, abstract = { To face the urgent need to train strategic and operational managers in dealing with complex crises, we are researching and developing an innovative decision support system to be used for crisis management and interactive crisis training. This paper provides an overview of current decision-support systems, simulation software and other technologies specifically designed to serve crisis managers. These findings inform the design of a new interactive simulation technology system, where a 3D Common Operational Picture (COP) is shared between tactile digital whiteboard in the command center and mobile devices in the field. This paper presents an innovative system for crisis management and training. }, month = apr, year = 2012, pages = {144}, isbn = {978-0-86491-332-6}, booktitle = {9th International Conference for Crisis Response and Management (ISCRAM)}, address = {Conference held in Vancouver, BC, Canada}, url = {https://www.crs4.it/vic/data/papers/iscram2012-indigo.pdf}, } %################################ %### 2011 %################################ @InProceedings{Agus:2011:VEI, idxkey = {}, idxproject = {FIXME}, title = {Visual enhancements for improved interactive rendering on light field displays}, author = {Marco Agus and Giovanni Pintore and Fabio Marton and Enrico Gobbetti and Antonio Zorcolo}, abstract = { Rendering of complex scenes on a projector-based light field display requires 3D content adaptation in order to provide comfortable viewing experiences in all conditions. In this paper we report about our approach to improve visual experiences while coping with the limitations in the effective field of depth and the angular field of view of the light field display. We present adaptation methods employing non-linear depth mapping and depth of field simulation which leave large parts of the scene unmodified, while modifying the other parts in a non-intrusive way. The methods are integrated in an interactive visualization system for the inspection of massive models on a large scale 35MPixel light field display. Preliminary results of subjective evaluation demonstrate that our rendering adaptation techniques improve visual comfort without affecting the overall depth perception. }, month = nov, year = 2011, pages = {1--7}, booktitle = {Eurographics Italian Chapter Conference}, address = {Conference held in Salerno, Italy}, publisher = pub-EUROGRAPHICS, url = {https://www.crs4.it/vic/data/papers/egit2011-holo.pdf}, isbn = {978-3-905673-88-3}, } @InProceedings{Marton:2011:NEM, idxproject = {FIXME}, author = {Fabio Marton and Marco Agus and Giovanni Pintore and Enrico Gobbetti}, title = {{FOX}: The {Focus Sliding Surface} Metaphor for Natural Exploration of Massive Models on Large-scale Light Field Displays}, booktitle = {Proc. VRCAI}, year = 2011, month = Dec, pages = {83--90}, abstract = { We report on a virtual environment for natural immersive exploration of extremely detailed surface models on light field displays. Our specialized 3D user interface allows casual users to inspect 3D objects at various scales, integrating panning, rotating, and zooming controls into a single low-degree-of-freedom operation, while taking into account the requirements for comfortable viewing on a light field display hardware. Specialized multiresolution structures, embedding a fine-grained per-patch spatial index within a coarse-grained patch-based mesh structure, are exploited for fast batched I/O, GPU accelerated rendering, and user-interaction-system-related geometric queries. The capabilities of the system are demonstrated by the interactive inspection of a giga-triangle dataset on a large scale 35MPixel light field display controlled by wired or vision-based devices. }, url = {https://www.crs4.it/vic/data/papers/vrcai2011-fox.pdf}, doi = {10.1145/2087756.2087767} } @InProceedings{Pintus:2011:RRM, idxproject = {INDIGO, DISTRICT-LAB3D}, author = {Ruggero Pintus and Enrico Gobbetti and Marco Agus}, title = {Real-time Rendering of Massive Unstructured Raw Point Clouds using Screen-space Operators}, booktitle = {The 12th International Symposium on Virtual Reality, Archaeology and Cultural Heritage}, year = 2011, month = Oct, pages = {105-112}, abstract = { Nowadays, 3D acquisition devices allow us to capture the geometry of huge Cultural Heritage (CH) sites, historical buildings and urban environments. We present a scalable real-time method to render this kind of models without requiring lengthy preprocessing. The method does not make any assumptions about sampling density or availability of normal vectors for the points. On a frame-by-frame basis, our GPU accelerated renderer computes point cloud visibility, fills and filters the sparse depth map to generate a continuous surface representation of the point cloud, and provides a screen-space shading term to effectively convey shape features. The technique is applicable to all rendering pipelines capable of projecting points to the frame buffer. To deal with extremely massive models, we integrate it within a multi-resolution out-of-core real-time rendering framework with small pre-computation times. Its effectiveness is demonstrated on a series of massive unstructured real-world Cultural Heritage datasets. The small precomputation times and the low memory requirements make the method suitable for quick onsite visualizations during scan campaigns. }, url = {https://www.crs4.it/vic/data/papers/vast2011-pbr.pdf}, } @InProceedings{Pintus:2011:FRS, idxproject = {INDIGO, DISTRICT-LAB3D}, author = {Ruggero Pintus and Enrico Gobbetti and Roberto Combet}, title = {Fast and Robust Semi-Automatic Registration of Photographs to {3D} Geometry}, booktitle = {The 12th International Symposium on Virtual Reality, Archaeology and Cultural Heritage}, year = 2011, month = Oct, pages = {9-16}, abstract = { We present a simple, fast and robust technique for semi-automatic 2D-3D registration capable to align a large set of unordered images to a massive point cloud with minimal human effort. Our method converts the hard to solve image-to-geometry registration problem in a Structure-from-Motion (SfM) plus a 3D-3D registration problem. We exploit a SfM framework that, starting just from the unordered image collection, computes an estimate of camera parameters and a sparse 3D geometry deriving from matched image features. We then coarsely register this model to the given 3D geometry by estimating a global scale and absolute orientation using minimal manual intervention. A specialized sparse bundle adjustment (SBA) step, exploiting the correspondence between the model deriving from image features and the fine input 3D geometry, is then used to refine intrinsic and extrinsic parameters of each camera. Output data is suitable for photo blending frameworks to produce seamless colored models. The effectiveness of the method is demonstrated on a series of real-world 3D/2D Cultural Heritage datasets. }, url = {https://www.crs4.it/vic/data/papers/vast2011-photo-to-geometry-registration.pdf}, } @InProceedings{Gobbetti:2011:REM, idxproject = {V-CITY}, author = {Enrico Gobbetti and Fabio Marton and Marco {Di Benedetto} and Fabio Ganovelli and Matthias Buehler and Simon Schubiger and Matthias Specht and Chris Engels and Luc Van Gool}, title = {Reconstructing and Exploring Massive Detailed Cityscapes}, booktitle = {The 12th International Symposium on Virtual Reality, Archaeology and Cultural Heritage}, year = 2011, month = Oct, pages = {1-8}, abstract = { We present a state-of-the-art system for obtaining and exploring large scale thr ee-dimensional models of urban landscapes. A multimodal approach to reconstructi on fuses cadastral information, laser range data, and oblique imagery into build ing models, which are then refined by applying procedural rules for replacing te xtures with 3D elements, such as windows and doors, therefore enhancing the mode l quality and adding semantics to the model. For city scale exploration, these d etailed models are uploaded to a web-based service, which automatically construc ts an approximate scalable multiresolution representation. This representation c an be interactively transmitted and visualized over the net to clients ranging f rom graphics PCs to web-enabled portable devices. The approach's characteristics and performance are illustrated using real-world city-scale data. }, isbn = {978-3-905674-34-7}, issn = {1811-864X}, url = {https://www.crs4.it/vic/data/papers/vast2011-vcity-pipeline.pdf}, } @InProceedings{Himmelstein:2011:TVP, idxproject = {V-CITY}, author = {Jesse Himmelstein and Olivier Balet and Fabio Ganovelli and Enrico Gobbetti and Matthias Specht and Pascal Mueller and Chris Engels and Luc van Gool and Jean-Baptiste {de la Rivi\`ere} and Armando Cavazzini}, title = {The V-City Project}, booktitle = {The 12th International Symposium on Virtual Reality, Archaeology and Cultural Heritage}, year = 2011, month = Oct, pages = {57--60}, abstract = { 3D geoinformatics have entered the digital age, hesitantly in some areas, and ra mpantly in others. Google Earth and Microsoft Virtual Earth are household names. However, these projects are limited to textured 3D landscapes, aerial 2D images and a few boxy building envelopes. The V-City project is a European research in itiative to surpass these limitations, and create a system for intuitively explo ring large urban areas with a high degree of detail. Bringing together technolog ies from geoinformatics, virtual reality, computer graphics, and computer vision , the system constructs detailed 3D city models from geopositioned aerial images and building footprints. For networked browsing, city models are compressed and streamed for interactive viewing of entire landscapes. A unique tactile table h as also been developed to let multiple users visualize the same city model in st ereo 3D, and interact with it simultaneously using hand gestures. }, isbn = {978-3-905673-86-9}, url = {https://www.crs4.it/vic/data/papers/vast2011-vcity-project.pdf}, } @Article{Suter:2011:IMT, idxkey = {TOP-THEME-VOLUMETRIC}, idxproject = {}, author = {{Susanne K.} Suter and {Jos\'e Antonio} {Iglesias Guiti\'an} and Fabio Marton and Marco Agus and Andreas Elsener and {Christoph P.E.} Zollikofer and M. Gopi and Enrico Gobbetti and Renato Pajarola}, title = {Interactive Multiscale Tensor Reconstruction for Multiresolution Volume Visualization}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = 2011, pages = {2135--2143}, abstract = { Large scale and structurally complex volume datasets from high-resolution 3D imaging devices or computational simulations pose a number of technical challenges for interactive visual analysis. In this paper, we present the first integration of a multiscale volume representation based on tensor approximation within a GPU-accelerated out-of-core multiresolution rendering framework. Specific contributions include (a) a hierarchical brick-tensor decomposition approach for pre-processing large volume data, (b) a GPU accelerated tensor reconstruction implementation exploiting CUDA capabilities, and (c) an effective tensor-specific quantization strategy for reducing data transfer bandwidth and out-of-core memory footprint. Our multiscale representation allows for the extraction, analysis and display of structural features at variable spatial scales, while adaptive level-of-detail rendering methods make it possible to interactively explore large datasets within a constrained memory footprint. The quality and performance of our prototype system is evaluated on large structurally complex datasets, including gigabyte-sized micro-tomographic volumes. }, note = {Proc. IEEE Visualization}, url = {https://www.crs4.it/vic/data/papers/ieeeviz2011-ta.pdf}, } @Article{Pintus:2011:FLS, idxproject = {INDIGO}, author = {Ruggero Pintus and Enrico Gobbetti and Marco Callieri}, title = {Fast Low-Memory Seamless Photo Blending on Massive Point Clouds using a Streaming Framework}, journal = {ACM Journal on Computing and Cultural Heritage (JOCCH)}, year = 2011, volume = 4, number = 2, pages = {Article 6}, abstract = { We present an efficient scalable streaming technique for mapping highly detailed color information on extremely dense point clouds. Our method does not require meshing or extensive processing of the input model, works on a coarsely spatially-reordered point stream and can adaptively refine point cloud geometry on the basis of image content. Seamless multi-band image blending is obtained by using GPU accelerated screen-space operators, which solve point set visibility, compute a per-pixel view-dependent weight and ensure a smooth weighting function over each input image. The proposed approach works independently on each image in a memory coherent manner, and can be easily extended to include further image quality estimators. The effectiveness of the method is demonstrated on a series of massive real-world point datasets. }, url = {https://www.crs4.it/vic/data/papers/jocch2011-streaming-photo-blending.pdf}, } @InProceedings{Marton:2011:RCM, idxproject = {}, author = {Fabio Marton and Enrico Gobbetti and Fabio Bettio and {Jos\'e Antonio} {Iglesias Guiti\'an} and Ruggero Pintus}, title = {A Real-time coarse-to-fine multiview capture system for all-in-focus rendering on a light-field display}, booktitle = {Proc. 3DTV Conference: The True Vision - Capture, Transmission and Display of 3D Video (3DTV-CON)}, year = 2011, abstract = { We present an end-to-end system capable of real-time capturing and displaying with full horizontal parallax high-quality 3D video contents on a cluster-driven multiprojector light-field display with full horizontal parallax. The capture component is an array of low-cost USB cameras connected to a single PC. Raw M-JPEG data coming from the software-synchronized cameras are multicast over Gigabit Ethernet to the back-end nodes of the rendering cluster, where they are decompressed and rendered. For all-in-focus rendering, view-dependent depth is estimated on the GPU using a customized multiview space-sweeping approach based on fast Census-based area matching implemented in CUDA. Real-time performance is demonstrated on a system with 18 cameras and 72 rendering projectors. }, pages = {1--4}, url = {https://www.crs4.it/vic/data/papers/3dtv2011-holorecon.pdf}, issn = {2161-2021}, doi = {10.1109/3DTV.2011.5877176 }, } @InProceedings{Pintus:2011:SFD, idxproject = {INDIGO}, author = {Ruggero Pintus and Enrico Gobbetti and Marco Callieri}, title = {A Streaming Framework for Seamless Detailed Photo Blending on Massive Point Clouds}, booktitle = {Proc. Eurographics Area Papers}, year = 2011, abstract = { We present an efficient scalable streaming technique for mapping highly detailed color information on extremely dense point clouds. Our method does not require meshing or extensive processing of the input model, works on a coarsely spatially-reordered point stream and can adaptively refine point cloud geometry on the basis of image content. Seamless multi-band image blending is obtained by using GPU accelerated screen-space operators, which solve point set visibility, compute a per-pixel view-dependent weight and ensure a smooth weighting function over each input image. The proposed approach works independently on each image in a memory coherent manner, and can be easily extended to include further image quality estimators. The effectiveness of the method is demonstrated on a series of massive real-world point datasets. }, pages = {25--32}, issn = {1017-4656}, url = {https://www.crs4.it/vic/data/papers/egareasch2011-streaming-photo-blending.pdf}, } @Article{Schmid:2011:GFP, idxproject = {3DANATOMICALHUMAN}, author = {J{\'e}r{\^o}me Schmid and Jos{\'e} Antonio {Iglesias Guiti{\'a}n} and Enrico Gobbetti and Nadia Magnenat-Thalmann}, title = {A GPU framework for parallel segmentation of volumetric images using discrete deformable models}, journal = {The Visual Computer}, volume = {27}, number = {2}, pages = {85--95}, year = 2011, abstract = { Despite the ability of current GPU processors to treat heavy parallel computation tasks, its use for solving medical image segmentation problems is still not fully exploited and remains challenging. A lot of difficulties may arise related to, for example, the different image modalities, noise and artifacts of source images, or the shape and appearance variability of the structures to segment. Motivated by practical problems of image segmentation in the medical field, we present in this paper a GPU framework based on explicit discrete deformable models, implemented over the NVidia CUDA architecture, aimed for the segmentation of volumetric images. The framework supports the segmentation in parallel of different volumetric structures as well as interaction during the segmentation process and real-time visualization of the intermediate results. Promising results in terms of accuracy and speed on a real segmentation experiment have demonstrated the usability of the system. }, url = {https://www.crs4.it/vic/data/papers/tvc2011-gpu-segmentation.pdf}, doi = {10.1007/s00371-010-0532-0} } @PhdThesis{Iglesias:2011:RGO, author = {{Jos{\'e} Antonio} {Iglesias Guiti{\'a}n}}, title = {Real-time GPU-accelerated Out-of-core Rendering and Light-field Visualization for Improved Massive Volumes Understanding}, school = {Dept. of Electrical Engineering, University of Cagliari, Italy}, year = 2011, url = {https://www.crs4.it/vic/data/papers/2011-phd-iglesias-volumes.pdf}, abstract= { Volumetric datasets are growing at incredible rates in terms of number and size resulting in two visualization challenges: maintaining performance and extracting meaningful information. These two challenges are closely related, since user interaction, which imposes real-time constraints, is a key to volumetric understanding. In this thesis, we introduce scalable methods for rendering volumes of potentially unlimited size on modern GPU architectures. Furthermore we present methods to improve their understanding through illustrative techniques and presentation on advanced 3D displays. } } @MastersThesis{Tinti:2001:PSL, author = {Alex Tinti}, title = {A point-based system for local and remote exploration of dense 3D scanned models}, school = {Dept. of Mathematics and Informatics, University of Cagliari, Italy}, year = {2011}, url = {https://www.crs4.it/vic/data/papers/2011-msc-tinti-nembo.pdf}, abstract= { Today's 3D laser scanning technologies and digital photography techniques allow to easily acquire multi-million points models at a sub-millimetric detail, providing a hi-quality digital representation of the object of interest. Unfor tunately, standard rendering algorithms are not capable to deal, at run-time, with a such huge amount of data on commodity graphic platforms and often just a coarse grained low-res model can be shown to the end-user. This thesis focuses on a client-server system for real-time point based rendering and network distribution of large 3D models on low-end platforms. Rendering strategy is based on a hierarchical multi-resolution structure, a BSP tree, composed by compressed nodes with thousands of point samples. Nodes are built in a 2-phases process: the first phase extracts leaf nodes from the raw input dataset; the second instead, starting from leafs, builds inner nodes by merging and filtering pairs of children to obtain the parent node, until the unique root is constructed. The actual view dependent representation of the multi-resolution model gets incrementally updated at run-time by an adaptive refinement process that fetches from the local or remote out-of-core multi-resolution structure dataset. Vertex and fragment shaders are used to render the GPU cached model representation with a hi-quality elliptical sample drawing and for other several shading effects. Single-touch user interface, which allows end-users the model inspection, includes a bidirectional-hyperlink system to access to remote multimedia con- tents, connecting different parts of 3D model to several information sources. Area, distance and angle measurements instruments have also been implemented as well as more common tools like model, camera and light control. The system can provide an effortless exploration of hi-detailed 3D models, from small artifacts to larger sites and can be distributed as web-plugin, stand-alone application or even as museal kiosk installation, to be placed next to the real 3D artwork in order to improve visitor's experience. } } %################################ %### 2010 %################################ @InCollection {Pintus:2010:PR3, author = {Ruggero Pintus and Thomas Malzbender and Oliver Wang and Ruth Bergman and Hila Nachlieli and Gitit Ruckenstein}, affiliation = {CRS4 (Center for Advanced Studies, Research and Development in Sardinia), Parco Scientifico e Tecnologico, POLARIS, Edificio 1, 09010 Pula, CA Italy}, title = {Photo Repair and {3D} Structure from Flatbed Scanners Using 4- and 2-Source Photometric Stereo}, booktitle = {Computer Vision, Imaging and Computer Graphics. Theory and Applications}, series = {Communications in Computer and Information Science}, editor = {AlpeshKumar Ranchordas and {Joao Madeiras} Pereira and {Helder J.} Araujo and {Joao Manuel R. S.} Tavares}, publisher = {Springer Berlin Heidelberg}, isbn = {978-3-642-11840-1}, keyword = {Computer Science}, pages = {326-342}, volume = {68}, year = {2010}, abstract = { We recently introduced a technique that allows 3D information to be captured from a conventional flatbed scanner. The technique requires no hardware modification and allows untrained users to easily capture 3D datasets. Once captured, these datasets can be used for interactive relighting and enhancement of surface detail on physical objects. We have also found that the method can be used to scan and repair damaged photographs. Since only the 3D structure on these photographs will typically be surface tears and creases, our method provides an accurate procedure for automatically detecting these flaws without any user intervention. Once detected, automatic techniques, such as infilling and texture synthesis, can be leveraged to seamlessly repair such damaged areas. We here provide a more thorough exposition and significant new material. We first present a method that is able to repair damaged photographs with minimal user interaction and then show how we can achieve similar results using a fully automatic process. }, thumbnail = {https://www.crs4.it/vic/data/papers/visapp-2009-photo.jpg}, doi = {10.1007/978-3-642-11840-1_24}, } @InProceedings{Pintore:2010:AMM, idxkey = {}, idxproject = {SBLGSM-IRAD}, title = {An Application of Multiresolution Massive Surface Representations to the Simulation of Asteroid Missions}, author = {Giovanni Pintore and Enrico Gobbetti and Fabio Marton and Russell Turner and Roberto Combet}, abstract = { We report on a real-time application supporting fast, realistic real-time rendering of asteroid datasets, as well as collision detection and response between the asteroid and prototype robotic surface exploration vehicles. The system organizes the asteroid surface into a two-level multiresolution structure, which embeds a fine-grained per-patch spatial index within a coarse-grained patch-based structure. The coarse-grained structure, maintained out-of-core, is used for fast batched I/O and GPU accelerated rendering, while the per-patch fine-grained structure is used to accelerate raycasting and collision queries. The resulting system has been tested with a simple robot lander and surface exploration simulator. The system models gravity using mass particles uniformly distributed within the asteroid bodies. Real-time performance is achieved on a commodity platform with giga triangle representations of asteroids 25143 Itokawa and 433 Eros.}, month = nov, year = 2010, pages = {9--16}, booktitle = {Eurographics Italian Chapter Conference}, address = {Conference held in Genoa, Italy}, publisher = pub-EUROGRAPHICS, url = {https://www.crs4.it/vic/data/papers/egit2010-asteroid.pdf}, } @InProceedings{Marras:2010:TEG, idxkey = {}, idxproject = {DISTRICT-LAB3D}, title = {Two examples of GPGPU acceleration of memory-intensive algorithms}, author = {Stefano Marras and Claudio Mura and Enrico Gobbetti and Riccardo Scateni and Roberto Scopigno}, abstract = { The advent of GPGPU technologies has allowed for sensible speed-ups in many high-dimension, memory-intensive computational problems. In this paper we demonstrate the e ectiveness of such techniques by describing two applications of GPGPU computing to two di erent subfields of computer graphics, namely computer vision and mesh processing. In the first case, CUDA technology is employed to accelerate the computation of approximation of motion between two images, known also as optical flow. As for mesh processing, we exploit the massively parallel architecture of CUDA devices to accelerate the face clustering procedure that is employed in many recent mesh segmentation algorithms. In both cases, the results obtained so far are presented and thoroughly discussed, along with the expected future development of the work.}, month = nov, year = 2010, pages = {49--56}, booktitle = {Eurographics Italian Chapter Conference}, address = {Conference held in Genoa, Italy}, publisher = pub-EUROGRAPHICS, url = {https://www.crs4.it/vic/data/papers/egit2010-gpgpu.pdf}, } @InProceedings{Giachetti:2010:EAE, idxkey = {}, idxproject = {3DANATOMICALHUMAN}, title = {Edge adaptive and energy preserving volume upscaling for high quality volume rendering}, author = {Andrea Giachetti and {Jos{\'e} Antonio} {Iglesias Guiti{\'a}n} and Enrico Gobbetti}, abstract = { We describe an edge-directed optimization-based method for volumetric data supersampling. The method is based on voxel splitting and iterative refinement performed with a greedy optimization driven by the smoothing of second order gray level derivatives and the assumption that the average gray level in the original voxels region cannot change. Due to these assumptions, the method, which is the 3D extension of a recently proposed technique, is particularly suitable for upscaling medical imaging data creating physically reasonable voxel values and overcoming the so-called partial volume effect. The good quality of the results obtained is demonstrated through experimental tests. Furthermore, we show how offline 3D upscaling of volumes can be coupled with recent techniques to perform high quality volume rendering of large datasets, obtaining a better inspection of medical volumetric data. }, month = nov, year = 2010, pages = {17--23}, booktitle = {Eurographics Italian Chapter Conference}, address = {Conference held in Genoa, Italy}, publisher = pub-EUROGRAPHICS, url = {https://www.crs4.it/vic/data/papers/egit2010-upscaling.pdf}, } @InProceedings{Goswami:2010:HQI, idxproject = {}, author = {Prashant Goswami and Yanci Zhang and Renato Pajarola and Enrico Gobbetti}, title = {High Quality Interactive Rendering of Massive Point Models using Multi-way kd-Trees}, booktitle = {18th Pacific Conference on Computer Graphics and Applications (PG)}, year = 2010, abstract = { We present a simple and efficient technique for out-of-core multi-resolution construction and high quality visualization of large point datasets. The method introduces a novel hierarchical LOD data organization based on multi-way kd-trees that simplifies memory management and allows controlling the LOD tree's height. The technique is incorporated in a full end-to-end system, which is evaluated on complex models made of hundreds of millions of points. }, pages = {93--100}, url = {https://www.crs4.it/vic/data/papers/pg2010-multi-way-kdtrees.pdf}, doi = {10.1109/PacificGraphics.2010.20}, isbn = {978-1-4244-8288-7}, } @InProceedings{Agus:2010:SVS, idxproject = {3DANATOMICALHUMAN}, author = {Marco Agus and Enrico Gobbetti and Jos{\'e} Antonio {Iglesias Guiti{\'a}n} and Fabio Marton}, title = {Split-Voxel: A Simple Discontinuity-Preserving Voxel Representation for Volume Rendering}, booktitle = {Proc. Volume Graphics}, year = 2010, pages = {21--28}, abstract = { The most common representation of volumetric models is a regular grid of cubical voxels with one value each, from which a smooth scalar field is reconstructed. However, common real-world situations include cases in which volumes represent physical objects with well defined boundaries separating different materials, giving rise to models with quasi-impulsive gradient fields. In our split-voxel representation, we replace blocks of $N^3$ voxels by one single voxel that is split by a feature plane into two regions with constant values. This representation has little overhead over storing precomputed gradients, and has the advantage that feature planes provide minimal geometric information about the underlying volume regions that can be effectively exploited for volume rendering. We show how to convert a standard mono-resolution representation into a out-of-core multiresolution structure, both for labeled and continuous scalar volumes. We also show how to interactively explore the models using a multiresolution GPU ray-casting framework. The technique supports real-time transfer function manipulation and proves particularly useful for fast multiresolution rendering, since accurate silhouettes are preserved even at very coarse levels of detail. }, url = {https://www.crs4.it/vic/data/papers/vg2010-split-voxel.pdf}, } @InProceedings{Agus:2010:ELD, idxproject = {3DANATOMICALHUMAN}, author = {Marco Agus and Enrico Gobbetti and Jos{\'e} Antonio {Iglesias Guiti{\'a}n} and Fabio Marton}, title = {Evaluating layout discrimination capabilities of continuous and discrete automultiscopic displays}, booktitle = {Proc. Fourth International Symposium on 3D Data Processing, Visualization and Transmission}, year = 2010, note = {Paper 100, Electronic proceedings}, abstract = { Continuous automultiscopic displays represent a promising technology, able to drive users into really involving and compelling experiences. In this paper, we report on perceptual experiments carried out to evaluate the depth discrimination capabilities of this technology with respect to two-view (stereo) and discrete multi-view designs. The evaluation employed a large scale multi-projector 3D display offering continuous horizontal parallax in a room size workspace. Two tests were considered in the context of depth oblivious rendering technique: a layout discrimination task, and a path tracing task. Our results confirm that continuous multiview technology is able to elicit depth cues more efficiently with respect to standard stereo system, providing clear advantages in typical analysis tasks like network structures understanding. Furthermore, our results indicate that depth perception capabilities are closely related to the number of views provided by multiview systems. }, url = {https://www.crs4.it/vic/data/papers/3dpvt2010-evaluation.pdf}, } @Article{Pintus:2010:SER, idxproject = {DISTRICT-LAB3D}, author = {Ruggero Pintus and Enrico Gobbetti and Paolo Cignoni and Roberto Scopigno}, title = {Shape Enhancement for Rapid Prototyping}, journal = {The Visual Computer}, volume = {26}, number = {6--8}, pages = {831-840}, year = 2010, abstract = { Many applications, for instance in the reverse engineering and cultural heritage field, require to build a physical replica of 3D digital models. Recent 3D printers can easily perform this task in a relatively short time and using color to reproduce object textures. However, the finite resolution of printers and, most of all, some peculiar optical and physical properties of the used materials reduce their perceptual quality. The contribution of this paper is a shape enhancing technique, which allows users to increase readability of the tiniest details in physical replicas, without requiring manual post-reproduction interventions. }, url = {https://www.crs4.it/vic/data/papers/cgi2010-shape-enhancement.pdf}, } @Article{Iglesias:2010:VDE, idxkey = {TOP-THEME-LIGHT-FIELD,Surgical,VOLREN}, idxproject = {3DANATOMICALHUMAN}, author = {Jos{\'e} Antonio {Iglesias Guiti{\'a}n} and Enrico Gobbetti and Fabio Marton}, title = {View-dependent Exploration of Massive Volumetric Models on Large Scale Light Field Displays}, journal = {The Visual Computer}, volume = {26}, number = {6--8}, pages = {1037--1047}, year = 2010, abstract = { We report on a light-field display based virtual environment enabling multiple naked-eye users to perceive detailed multi-gigavoxel volumetric models as floating in space, responsive to their actions, and delivering different information in different areas of the workspace. Our contributions include a set of specialized interactive illustrative techniques able to provide different contextual information in different areas of the display, as well as an out-of-core CUDA based raycasting engine with a number of improvements over current GPU volume raycasters. The possibilities of the system are demonstrated by the multi-user interactive exploration of 64GVoxels datasets on a 35MPixel light field display driven by a cluster of PCs. }, url = {https://www.crs4.it/vic/data/papers/cgi2010-view-dependent.pdf}, } @InProceedings{Schmid:2010:GFP, idxproject = {3DANATOMICALHUMAN}, author = {J{\'e}r{\^o}me Schmid and Jos{\'e} Antonio {Iglesias Guiti{\'a}n} and Enrico Gobbetti and Nadia Magnenat-Thalmann}, title = {A GPU framework for parallel segmentation of volumetric images using discrete deformable models}, booktitle = {Proc. 3DAnatomicalHuman Summer School}, year = 2010, abstract = { Although research in image segmentation has been very active during the last decades, it is still a very challenging problem. A lot of difficulties may arise related to, for example, the different image modalities, noise and artifacts of source images, or the shape and appearance variability of the structures to segment. Motivated by problems of image segmentation in the medical field, we present in this paper a GPU framework based on explicit discrete deformable models, implemented over the NVidia CUDA architecture, aimed for the segmentation of volumetric images. The framework supports the segmentation in parallel of different volumetric structures as well as interaction during the segmentation process and real-time visualization of the intermediate results. Promising results in terms of accuracy and speed on a real segmentation experiment have demonstrated the usability of the system. }, url = {https://www.crs4.it/vic/data/papers/3dah2010-extended-abstract-gpu-segmentation.pdf}, } @InProceedings{Giachetti:2010:EPU, idxproject = {3DANATOMICALHUMAN}, author = {Andrea Giachetti and Jos{\'e} Antonio {Iglesias Guiti{\'a}n} and Enrico Gobbetti}, title = {An Energy Preserving Upscaling Technique for Enhanced Volume Rendering of Medical Data}, booktitle = {Proc. 3DAnatomicalHuman Summer School}, year = 2010, abstract = { In this paper we describe an edge-directed optimization-based method for volumetric data supersampling. Our method faces the problem of partial volume effect by upscaling the volumetric data, subdividing voxels in smaller parts and performing an optimization step keeping constant the energy of each original subdivided voxel while enhancing edge continuity. Experimental tests show the good quality of the results obtained with our approach. Furthermore, we show how offline 3D upscaling of volumes can be coupled with recent techniques to perform high quality volume rendering of large datsets, obtaining a better inspection of medical volumetric data. }, url = {https://www.crs4.it/vic/data/papers/3dah2010-extended-abstract-upscaling.pdf}, } @InProceedings{Ciuciu:2010:CSC, idxproject = {3DANATOMICALHUMAN}, author = {Ioana Ciuciu and Han Kang and Robert Meersman and J{\'e}r{\^o}me Schmid and Nadia Magnenat-Thalmann and Jos{\'e} Antonio {Iglesias Guiti{\'a}n} and Enrico Gobbetti}, title = {Collaborative Semantic Content Management: an Ongoing Case Study for Imaging Applications}, booktitle = {Proc. 11th European Conference on Knowledge Management}, address = {Conference held in Famalicao, Portugal}, year = 2010, month = sep, pages = {257--267}, abstract = { This paper presents a collaborative solution for knowledge management, implemented as a semantic content management system (CMS) with the purpose of knowledge sharing between users with different backgrounds. The CMS is enriched with semantic annotations, enabling content to be categorized, retrieved and published on the Web thanks to the Linked Open Data (LOD) principle which enables the linking of data inside existing resources using a standardized URI mechanism. Annotations are done collaboratively as a social process. Users with different backgrounds express their knowledge using structured natural language. The user knowledge is captured thanks to an ontologic approach and it can be further transformed into RDF(S) classes and properties. Ontologies are at the heart of our CMS and they naturally co-evolve with their communities of use to provide a new way of knowledge sharing inside the network. The ontology is modeled following the so-called DOGMA (Developing Ontology- Grounded Methods and Applications) paradigm, grounded in natural language. The approach will be demonstrated on a use case concerning the semantic annotation of anatomical data (e.g. medical images). }, url = {https://www.crs4.it/vic/data/papers/eckm2010-csc.pdf}, } %################################ %### 2009 %################################ @TechReport{Gobbetti:2009:DIL, idxproject = {DISTRICT-LAB3D}, author = {Enrico Gobbetti and Fabio Bettio and Fabio Marton and Marco Agus and Ruggero Pintus and Gianmauro Cuccuru and Roberto Combet and Emilio Merella and Alex Tinti}, title = {{Distretto ICT LAB3D}: Laboratorio di Acquisizione, Distribuzione e Visualizzazione di Modelli {3D} Complessi}, institution = {CRS4}, year = 2009, abstract = { Questo rapporto motiva e discute i primi tre anni di attività del Laboratorio di Acquisizione, Distribuzione e Visualizzazione di Modelli 3D Complessi del Distretto ICT della Regione Sardegna. }, url = {https://www.crs4.it/vic/data/papers/districtlab3d2009-report.pdf}, } @InCollection{Luo:2009:CPF, idxkey = {Surgical,VOLREN}, idxproject = {3DANATOMICALHUMAN}, author = {Yanlin Luo and Jos\'e Antonio {Iglesias Guiti\'an} and Enrico Gobbetti and Fabio Marton}, title = {Context Preserving Focal Probes for Exploration of Volumetric Medical Datasets}, booktitle = {Modelling the Physiological Human, 3D Physiological Human Workshop}, editor = {Nadia Magnenat-Thalmann}, series = ser-LNCS, publisher = pub-SV, volume = 5903, year = 2009, pages = {187--198}, abstract = { During real-time medical data exploration using volume rendering, it is often difficult to enhance a particular region of interest without losing context information. In this paper, we present a new illustrative technique for focusing on a user-driven region of interest while preserving context information. Our focal probes define a region of interest using a distance function which controls the opacity of the voxels within the probe, exploit silhouette enhancement and use non-photorealistic shading techniques to improve shape depiction. }, url = {https://www.crs4.it/vic/data/papers/3dph2009-illustrative.pdf} } @Booklet{Yoon:2009:IMM, idxproject = {V-CITY,BOEING777}, title = {Interactive Massive Model Rendering}, author = {{Sung-eui} Yoon and Dinesh Manocha and David Kasik and Enrico Gobbetti and Renato Pajarola and Philipp Slusallek}, howpublished = {IEEE VisWeek 2009 Course Notes}, address = {Atlantic City, NJ, USA}, month = oct, year = 2009, thumbnail = {https://www.crs4.it/vic/data/papers/visweek2009.jpg}, url = {}, } @InProceedings{DiBenedetto:2009:IRE, idxproject = {V-CITY}, author = {Marco {Di Benedetto} and Paolo Cignoni and Fabio Ganovelli and Enrico Gobbetti and Fabio Marton and Roberto Scopigno}, title = {Interactive Remote Exploration of Massive Cityscapes}, booktitle = {The 10th International Symposium on Virtual Reality, Archaeology and Cultural Heritage}, year = 2009, month = Oct, pages = {9--16}, abstract = { We focus on developing a simple and efficient unified level-of-detail structure for networked urban model viewers. At the core of our approach is a revisitation of the BlockMap data structure, originally introduced for encoding coarse representations of blocks of buildings to be used as direction-independent impostors when rendering far-away city blocks. The contribution of this paper is manifold: we extend the BlockMap representation to support sloped surfaces and input-sensitive sampling of color; we introduce a novel sampling strategy for building accurate BlockMaps; we show that BlockMaps can be used as a versatile and robust way to parameterize the visible surface of a highly complex model; we improve the expressiveness of urban models rendering by integrating an ambient occlusion term in the representation and describe an efficient method for computing it; we illustrate the design and implementation of a urban models streaming and visualization system and demonstrate its efficiency when browsing large city models in a limited bandwidth setting. }, url = {https://www.crs4.it/vic/data/papers/vast2009-blockmaps.pdf}, } @InProceedings{Bettio:2009:PSL, idxproject = {DISTRICT-LAB3D}, author = {Fabio Bettio and Enrico Gobbetti and Fabio Marton and Alex Tinti and Emilio Merella and Roberto Combet}, title = {A point-based system for local and remote exploration of dense {3D} scanned models}, booktitle = {The 10th International Symposium on Virtual Reality, Archaeology and Cultural Heritage}, year = 2009, month = Oct, pages = {25-32}, abstract = { We present a client-server framework for network distribution and real-time point-based rendering of large 3D models on commodity graphics platforms. Model inspection, based on a one-touch interface, is enriched by a bidirectional hyperlink system which provides access to multiple layers of multimedia contents linking different parts of the 3D model many information sources. In addition to view and light control, users can perform simple 3D operations like angle, distance and area measurements on the 3D model. An authoring tool derived from the basic client allows users to add multimedia content to the model description. Our rendering method is based on a coarse grained multiresolution structure, where each node contains thousands of point samples. At runtime, a view-dependent refinement process incrementally updates the current GPU-cached model representation from local or remote out-of-core data. Vertex and fragment shaders are used for high quality elliptical sample drawing and a variety of shading effects. The system is demonstrated with examples that range from documentation and inspection of small artifacts to exploration of large sites, in both a museum and a large scale distribution setting. }, url = {https://www.crs4.it/vic/data/papers/vast2009-nembo.pdf}, } @InProceedings{Cuccuru:2009:FLM, idxproject = {DISTRICT-LAB3D}, author = {Gianmauro Cuccuru and Enrico Gobbetti and Fabio Marton and Renato Pajarola and Ruggero Pintus}, title = {Fast low-memory streaming MLS reconstruction of point-sampled surfaces}, booktitle = {Graphics Interface}, year = 2009, month = May, pages = {15--22}, abstract = { We present a simple and efficient method for reconstructing triangulated surfaces from massive oriented point sample datasets. The method combines streaming and parallelization, moving least-squares (MLS) projection, adaptive space subdivision, and regularized isosurface extraction. Besides presenting the overall design and evaluation of the system, our contributions include methods for keeping in-core data structures complexity purely locally output-sensitive and for exploiting both the explicit and implicit data produced by a MLS projector to produce tightly fitting regularized triangulations using a primal isosurface extractor. Our results show that the system is fast, scalable, and accurate. We are able to process models with several hundred million points in about an hour and outperform current fast streaming reconstructors in terms of geometric accuracy. }, url = {https://www.crs4.it/vic/data/papers/gi09-streamls.pdf} } @Booklet{Magnenat:2009:3AM, idxproject = {3DANATOMICALHUMAN}, title = {3D Anatomical Modelling and Simulation Concepts}, author = {Nadia Magnenat-Thalmann and J{\'e}r{\^o}me Schmid and Herv{\'e} Delingette and Marco Agus and Jos{\'e} Antonio Iglesias Guiti{\'a}n}, howpublished = {Eurographics 2009 Course Notes}, address = {Eurographics 2009, Munich}, month = aug, year = 2009, thumbnail = {https://www.crs4.it/vic/data/papers/eg2007.jpg}, url = {}, } @Article{Agus:2009:I3M, idxkey = {TOP-THEME-SURGICAL,MR,VOLREN}, idxproject = {3DANATOMICALHUMAN,CYBERSAR}, author = {Marco Agus and Fabio Bettio and Andrea Giachetti and Enrico Gobbetti and {Jos\'e Antonio} {Iglesias Guiti\'an} and Fabio Marton and Jonas Nilsson and Giovanni Pintore}, title = {An interactive {3D} medical visualization system based on a light field display}, journal = {The Visual Computer}, year = 2009, volume = 25, number = 9, pages = {883--893}, abstract = { We present a prototype medical data visualization system exploiting a light field display and custom direct volume rendering techniques to enhance understanding of massive volumetric data, such as CT, MRI, and PET scans. The system can be integrated with standard medical image archives and extends the capabilities of current radiology workstations by supporting real-time rendering of volumes of potentially unlimited size on light field displays generating dynamic observer-independent light fields. The system allows multiple untracked naked-eye users in a sufficiently large interaction area to coherently perceive rendered volumes as real objects, with stereo and motion parallax cues. In this way, an effective collaborative analysis of volumetric data can be achieved. Evaluation tests demonstrate the usefulness of the generated depth cues and the improved performance in understanding complex spatial structures with respect to standard techniques. }, url = {https://www.crs4.it/vic/data/papers/tvc2009-holo3dph.pdf}, doi = {10.1007/s00371-009-0311-y} } @InProceedings{Agus:2009:AMM, idxproject = {CYBERSAR}, author = {Marco Agus and Fabio Bettio and Enrico Gobbetti and Fabio Marton and Antonio Zorcolo}, title = {Advances in massive model visualization in the CYBERSAR project}, booktitle = {Final workshop of GRID projects, PON RICERCA 2000-2006, AVVISO 1575}, year = 2009, month = Feb, pages = {}, abstract = { We provide a survey of the major results obtained within the CYBERSAR project in the area of massive data visualization. Despite the impressive improvements in graphics and computational hardware performance, interactive visualization of massive models still remains a challenging problem. To address this problem, we developed methods that exploit the programmability of latest generation graphics hardware, and combine coarse-grained multiresolution models, chunk-based data management with compression, incremental view-dependent level-of-detail selection, and visibility culling. The models that can be interactively rendered with our methods range from multi-gigabyte-sized datasets for general 3D meshes or scalar volumes, to terabyte-sized datasets in the restricted 2.5D case of digital terrain models. Such a performance enables novel ways of exploring massive datasets. In particular, we have demonstrated the capability of driving innovative light field displays able of giving multiple freely moving naked-eye viewers the illusion of seeing and manipulating massive 3D objects with continuous viewer-independent parallax. }, thumbnail = {https://www.crs4.it/vic/img/thumbnail/th_cybersar.jpg}, url = {https://www.crs4.it/vic/data/papers/cybersar2009-massive.pdf} } @InProceedings{Agus:2009:RRR, idxproject = {CYBERSAR}, author = {Marco Agus and Fabio Bettio and Enrico Gobbetti and Fabio Marton and Antonio Zorcolo}, title = {Recent results in rendering massive models on horizontal parallax-only light field displays}, booktitle = {Final workshop of GRID projects, PON RICERCA 2000-2006, AVVISO 1575}, year = 2009, month = Feb, pages = {}, abstract = { In this contribution, we report on specialized out-of-core multiresolution real-time rendering systems able to render massive surface and volume models on a special class of horizontal parallax-only light field displays. The displays are based on a specially arranged array of projectors emitting light beams onto a holographic screen, which then makes the necessary optical transformation to compose these beams into a continuous 3D view. The rendering methods employ state-of-the-art out-of-core multiresolution techniques able to correctly project geometries onto the display and to dynamically adapt model resolution by taking into account the particular spatial accuracy characteristics of the display. The programmability of latest generation graphics architectures is exploited to achieve interactive performance. As a result, multiple freely moving naked-eye viewers can inspect and manipulate virtual 3D objects that appear to them floating at fixed physical locations. The approach provides rapid visual understanding of complex multi-gigabyte surface models and volumetric data sets. }, thumbnail = {https://www.crs4.it/vic/img/thumbnail/th_cybersar.jpg}, url = {https://www.crs4.it/vic/data/papers/cybersar2009-holo.pdf} } @InProceedings{Pintus:2009:PRS, idxkey = {}, idxproject = {}, author = {Ruggero Pintus and Thomas Malzbender and Oliver Wang and Ruth Bergman and Hila Nachlieli and Gitit Ruckenstein}, title = {Photo Repair and {3D} Structure from Flatbed Scanners}, booktitle = {VISAPP International Conference on Computer Vision Theory and Applications}, year = 2009, month = Feb, pages = {40--50}, abstract = { We introduce a technique that allows 3D information to be captured from a conventional flatbed scanner. The technique requires no hardware modification and allows untrained users to easily capture 3D datasets. Once captured, these datasets can be used for interactive relighting and enhancement of surface detail on physical objects. We have also found that the method can be used to scan and repair damaged photographs. Since the only 3D structure on these photographs will typically be surface tears and creases, our method provides an accurate procedure for automatically detecting these flaws without any user intervention. Once detected, automatic techniques, such as infilling and texture synthesis, can be leveraged to seamlessly repair such damaged areas. We first present a method that is able to repair damaged photographs with minimal user interaction and then show how we can achieve similar results using a fully automatic process. }, url = {https://www.crs4.it/vic/data/papers/visapp-2009-photo.pdf}, } %################################ %### 2008 %################################ @InProceedings{Agus:2008:IAE, idxkey = {Surgical,MR,VOLREN,HOLO}, idxproject = {3DANATOMICALHUMAN}, author = {Marco Agus and Andrea Giachetti and Enrico Gobbetti and Jos\'e Antonio {Iglesias Guiti\'an} and Jonas Nilsson and Giovanni Pintore and Gianluigi Zanetti}, title = {Implementation and evaluation of an interactive volume visualization system on a lightfield display}, booktitle = {First 3D Physiological Human Workshop}, year = 2008, month = Dec, note = {Conference Abstract}, url = {https://www.crs4.it/vic/data/papers/3dph08-holo-summary.pdf} } @InProceedings{Pintus:2008:3SF, idxkey = {}, idxproject = {DISTRICT-LAB3D}, author = {Ruggero Pintus and Simona Podda and Massimo Vanzi}, title = {3D Sculptures From SEM Images}, booktitle = {EMC 2008 14th European Microscopy Congress 1-5 September 2008, Aachen, Germany}, year = 2008, pages = {597--598}, publisher = {Springer}, MASKEDurl = {https://www.crs4.it/vic/data/papers/FIXME}, } @Article{Pintus:2008:IAP, idxkey = {}, idxproject = {DISTRICT-LAB3D}, author = {Ruggero Pintus and Simona Podda and Massimo Vanzi}, title = {Improvements in automated photometric stereo {3D} SEM}, journal = {Microscopy and Microanalysis}, year = 2008, volume = 14, number = {Suppl. 2}, pages = {608--609}, note = {Extended abstract of a paper presented at Microscopy and Microanalysis 2008 in Albuquerque, New Mexico, USA, August 3 -- August 7, 2008}, url = {https://www.crs4.it/vic/data/papers/mm-2008-improvements.pdf}, } @Article{Mighela:2008:SRC, idxkey = {}, idxproject = {DISTRICT-LAB3D}, author = {Francesca Mighela and Cristian Perra and Ruggero Pintus and Simona Podda and Massimo Vanzi}, title = {{SEM} Remote Control with a {3D} Option}, journal = {Microscopy and Microanalysis}, year = 2008, volume = 14, number = {Suppl. 2}, pages = {892--893}, note = {Extended abstract of a paper presented at Microscopy and Microanalysis 2008 in Albuquerque, New Mexico, USA, August 3 -- August 7, 2008}, url = {https://www.crs4.it/vic/data/papers/mm-2008-sem-remote.pdf}, } @InProceedings{Cignoni:2008:CER, idxkey = {}, idxproject = {DISTRICT-LAB3D}, author = {Paolo Cignoni and Enrico Gobbetti and Ruggero Pintus and Roberto Scopigno}, title = {Color Enhancement Techniques for Rapid Prototyping}, booktitle = {The 9th International Symposium on Virtual Reality, Archaeology and Cultural Heritage}, month = Dec, year = 2008, pages = {9-16}, abstract = { We propose to exploit the color capabilities of recent rapid pototyping hardware devices to enhance the visual appearance of reproduced objects. In particular, by carefully pre-computing surface shading, we are able to counterbalance the sub-surface scattering (SSS) effects that hinder the perception of fine surface details. As a practical result, we are able to reproduce small scale copies of cultural heritage artifacts with an increased readability of the tiniest features and particulars, without requiring manual post-reproduction interventions or hand painting. }, url = {https://www.crs4.it/vic/data/papers/vast2008-colorprint.pdf}, } @InCollection{Guitian:2008:IUM, idxproject = {DISTRICT-LAB3D}, author = {{Jos\'e Antonio} {Iglesias Guiti\'an} and Marco Agus}, title = {Interfacce uomo-macchina nella Realt\`a Virtuale}, booktitle = {Human Computer Interaction -- fondamenti e prospettive}, editor = {Alessandro Soro}, publisher = {Polimetrica}, month = {}, year = 2008, pages = {289-330}, url = {https://www.crs4.it/vic/data/papers/hcim2008-interfacce.pdf}, abstract = { Questo capitolo fornisce una descrizione dei principali elementi che influenzano l'interazione uomo-macchina in riferimento alla realt\`a virtuale, per come si configurano attualmente, e per come si prevede si svilupperanno in un prossimo futuro. }, note = {In italian} } @Book{Yoon:2008:RTM, idxkey = {TOP-THEME-MASSIVE-MODELS}, author = {{Sung-eui} Yoon and Enrico Gobbetti and David Kasik and Dinesh Manocha}, title = {Real-time Massive Model Rendering}, publisher = {Morgan and Claypool}, year = 2008, volume = 2, number = 1, series = {Synthesis Lectures on Computer Graphics and Animation}, month = Aug, abstract= { Interactive display and visualization of large geometric and textured models is becoming a fundamental capability. There are numerous application areas, including games, movies, CAD, virtual prototyping, and scientific visualization. One of observations about geometric models used in interactive applications is that their model complexity continues to increase because of fundamental advances in 3D modeling, simulation, and data capture technologies. As computing power increases, users take advantage of the algorithmic advances and generate even more complex models and data sets. Therefore, there are many cases where we are required to visualize massive models that consist of hundreds of millions of triangles and, even, billions of triangles. However, interactive visualization and handling of such massive models still remains a challenge in computer graphics and visualization. In this monograph we discuss various techniques that enable interactive visualization of massive models. These techniques include visibility computation, simplification, levels-of-detail, and cache-coherent data management. We believe that the combinations of these techniques can make it possible to interactively visualize massive models in commodity hardware. }, url = {https://www.crs4.it/vic/data/papers/synlec2008-rtmmr-monograph.pdf}, doi = {10.2200/S00131ED1V01Y200807CGR007}, } @InProceedings{Agus:2008:AVD, idxkey = {Surgical,MR,VOLREN,HOLO}, idxproject = {3DANATOMICALHUMAN,CYBERSAR}, title = {Towards advanced volumetric display of the human musculoskeletal system}, author = {Marco Agus and Andrea Giachetti and Enrico Gobbetti and {Jos\'e Antonio} {Iglesias Guiti\'an} and Fabio Marton}, abstract = { We report on our research results on effective volume visualization techniques for medical and anatomical data. Our volume rendering approach employs GPU accelerated out-of-core direct rendering algorithms to fully support high resolution, 16 bits, raw medical datasets as well as segmentation. Images can be presented on a special light field display based on projection technology. Human anatomical data appear to moving viewers floating in the light field display space and can be interactively manipulated. }, month = jun, year = 2008, booktitle = {Eurographics Italian Chapter Conference}, address = {Conference held in Salerno, Italy}, publisher = pub-EUROGRAPHICS, url = {https://www.crs4.it/vic/data/papers/egit2008-movr.pdf}, } @Article{Gobbetti:2008:SGR, idxkey = {MR,VOLREN,TOP-THEME-VOLUMETRIC}, idxproject = {3DANATOMICALHUMAN,CYBERSAR}, author = {Enrico Gobbetti and Fabio Marton and {Jos\'e Antonio} {Iglesias Guiti\'an}}, title = {A single-pass {GPU} ray casting framework for interactive out-of-core rendering of massive volumetric datasets}, journal = {The Visual Computer}, year = 2008, volume = 24, number = {7-9}, pages = {797--806}, abstract = { We present an adaptive out-of-core technique for rendering massive scalar volumes employing single pass GPU raycasting. The method is based on the decomposition of a volumetric dataset into small cubical bricks, which are then organized into an octree structure maintained out-of-core. The octree contains the original data at the leaves, and a filtered representation of children at inner nodes. At runtime an adaptive loader, executing on the CPU, updates a view- and transfer function-dependent working set of bricks maintained on GPU memory by asynchronously fetching data from the out-of-core octree representation. At each frame, a compact indexing structure, which spatially organizes the current working set into an octree hierarchy, is encoded in a small texture. This data structure is then exploited by an efficient stackless raycasting algorithm, which computes the volume rendering integral by visiting non-empty bricks in front-to-back order and adapting sampling density to brick resolution. Block visibility information is fed back to the loader to avoid refinement and data loading of occluded zones. The resulting method is able to interactively explore multi-giga-voxel datasets on a desktop PC. }, note = {Proc. CGI 2008}, url = {https://www.crs4.it/vic/data/papers/cgi2008-movr.pdf} } @InProceedings{Gobbetti:2008:TSM, idxkey = {MR}, idxproject = {CYBERSAR,BOEING777}, author = {Enrico Gobbetti and Dave Kasik and {Sung-eui} Yoon}, title = {Technical Strategies for Massive Model Visualization}, booktitle = {Proc. ACM Solid and Physical Modeling Symposium}, organization = pub-ACM, publisher = pub-ACM:adr, year = 2008, pages = {405-415}, abstract = { Interactive visualization of massive models still remains a challenging problem. This is mainly due to a combination of ever increasing model complexity with the current hardware design trend that leads to a widening gap between slow data access speed and fast data processing speed. We argue that developing efficient data access and data management techniques is key in solving the problem of interactive visualization of massive models. Particularly, we discuss visibility culling, simplification, cache-coherent layouts, and data compression techniques as efficient data management techniques that enable interactive visualization of massive models. }, url = {https://www.crs4.it/vic/data/papers/spm2008-survey.pdf}, } @InProceedings{Balet:2008:CPS, idxkey = {Terrain, TCR, MR}, idxproject = {CRIMSON}, author = {Olivier Balet and Jerome Duysens and Jerome Comptdaer and Enrico Gobbetti and Roberto Scopigno}, title = {The {CRIMSON} Project: Simulating Populations in Massive Urban Environments}, booktitle = {Proc. 8th World Congress on Computational Mechanics}, year = 2008, pages = {}, abstract = { This short paper reviews some of the results obtained withing the European Project CRIMSON. }, url = {https://www.crs4.it/vic/data/papers/wccm2008-crimson.pdf}, } @Article{Agus:2008:GAD, idxdatesub = "2007:9", idxdatepub = "2008:4", idxkey = {TOP-THEME-LIGHT-FIELD, MR, Holography, VolRen}, idxproject = {CYBERSAR,3DANATOMICALHUMAN}, author = {Marco Agus and Enrico Gobbetti and {Jos\'e Antonio} {Iglesias Guiti\'an} and Fabio Marton and Giovanni Pintore}, title = {GPU Accelerated Direct Volume Rendering on an Interactive Light Field Display}, journal = j-CG-FORUM, publisher = pub-BLACKWELL, address = pub-BLACKWELL:adr, year = 2008, volume = 27, number = 3, pages = {231--240}, abstract = { We present a GPU accelerated volume ray casting system interactively driving a multi-user light field display. The display, driven by a single programmable GPU, is based on a specially arranged array of projectors and a holographic screen and provides full horizontal parallax. The characteristics of the display are exploited to develop a specialized volume rendering technique able to provide multiple freely moving naked-eye viewers the illusion of seeing and manipulating virtual volumetric objects floating in the display workspace. In our approach, a GPU ray-caster follows rays generated by a multiple-center-of-projection technique while sampling pre-filtered versions of the dataset at resolutions that match the varying spatial accuracy of the display. The method achieves interactive performance and provides rapid visual understanding of complex volumetric data sets even when using depth oblivious compositing techniques. }, note = {Proc. Eurographics 2008}, url = {https://www.crs4.it/vic/data/papers/eg2008-hgvr.pdf}, } @Article{Bettio:2008:SRM, idxdatesub = "2004:7", idxdatepub = "2004:12", idxkey = {TOP-THEME-LIGHT-FIELD, MR, Holography, Parallel}, idxproject = {CYBERSAR}, author = {Fabio Bettio and Enrico Gobbetti and Fabio Marton and Giovanni Pintore}, title = {Scalable Rendering of Massive Triangle Meshes on Light Field Displays}, journal = {Computers \& Graphics}, publisher = pub-ELS, address = pub-ELS:adr, year = 2008, volume = {32}, number = {1}, month = feb, pages = {55-64}, abstract = { We report on a multiresolution rendering system driving light field displays based on a specially arranged array of projectors and a holographic screen. The system gives multiple freely moving naked-eye viewers the illusion of seeing and manipulating 3D objects with continuous viewer-independent parallax. Multi-resolution techniques which take into account the displayed light field geometry are employed to dynamically adapt model resolution to display capabilities and timing constraints. The approach is demonstrated on two different scales: a desktop PC driving a 7.4Mbeams TV-size display, and a cluster-parallel solution driving a large (1.6x0.9 meters) 35Mbeams display which supports a room-size working space. In both cases, massive meshes of tens of millions of triangles are manipulated at interactive rates. }, url = {https://www.crs4.it/vic/data/papers/cag2008-holo.pdf}, } @Booklet{Yoon:2008:IMM, idxproject = {CYBERSAR,BOEING777}, title = {Interactive Massive Model Rendering}, author = {{Sung-eui} Yoon and Andreas Dietrich and Enrico Gobbetti and Dinesh Manocha and Fabio Marton and Renato Pajarola and Philipp Slusallek}, howpublished = {SIGGRAPH Asia 2008 Course Notes}, address = {SIGGRAPH Asia 2008, Singapore}, month = dec, year = 2008, thumbnail = {https://www.crs4.it/vic/data/papers/siggraphasia2008.jpg}, url = {}, } @Booklet{Kasik:2008:MMV, idxproject = {CYBERSAR,BOEING777}, title = {Massive Model Visualization Techniques}, author = {David Kasik and Andreas Dietrich and Enrico Gobbetti and Dinesh Manocha and Fabio Marton and Philipp Slusallek and Abe Stephens and {Sung-eui} Yoon}, howpublished = {SIGGRAPH 2008 Course Notes}, address = {SIGGRAPH 2008, San Diego}, month = aug, year = 2008, thumbnail = {https://www.crs4.it/vic/data/papers/siggraph2008.jpg}, url = {}, } %################################ %### 2007 %################################ @Booklet{Magnenat:2007:TVP, idxproject = {3DANATOMICALHUMAN}, title = {Towards the Virtual Physiological Human}, author = {Nadia Magnenat-Thalmann and Benjamin Gilles and Herv{\'e} Delingette and Andrea Giachetti and Marco Agus}, howpublished = {Eurographics 2007 Course Notes}, address = {Eurographics 2007, Prague}, month = aug, year = 2007, thumbnail = {https://www.crs4.it/vic/data/papers/eg2007.jpg}, url = {}, } @Booklet{Kasik:2007:SAM, idxproject = {CYBERSAR,BOEING777}, title = {State of the Art in Massive Model Visualization}, author = {David Kasik and Dinesh Manocha and Abe Stephens and Beat Bruderlin and Philipp Slusallek and Andreas Dietrich and Enrico Gobbetti and Fabio Marton and Wagner Correa and Inigo Quilez}, howpublished = {SIGGRAPH 2007 Course Notes}, address = {SIGGRAPH 2007, San Diego}, month = aug, year = 2007, thumbnail = {https://www.crs4.it/vic/data/papers/siggraph2007.jpg}, } @Article{Dietrich:2007:MMR, idxkey = {MR}, idxproject = {CYBERSAR,BOEING777}, author = {Andreas Dietrich and Enrico Gobbetti and {Sung-Eui} Yoon}, title = {Massive-Model Rendering Techniques: A Tutorial}, journal = {IEEE Computer Graphics and Applications}, year = 2007, volume = {27}, number = {6}, month = "nov/dec", pages = {20--34}, abstract = { The currently observed exponentially increasing size of 3D models prohibits rendering them using brute force methods. Researchers have proposed various outputsensitive rendering algorithms to overcome this challenge. This article provides an overview of this technology. }, thumbnail = {https://www.crs4.it/vic/data/papers/cga07-massive.jpg}, } @InProceedings{Brelstaff:2007:PDE, idxkey = {Holography}, idxproject = {COHERENT}, author = {Gavin Brelstaff and Marco Agus and Enrico Gobbetti and Gianluigi Zanetti}, title = {Pseudo-holographic device elicits rapid depth cues despite random-dot surface masking}, booktitle = {Perception, ECVP 2007 Abstract Supplement}, address = {Conference held in Arezzo, Italy}, pages = 202, year = 2007, volume = 36, abstract = { Experiments with random-dot masking demonstrate that, in the absence of cues mundanely available to 2-D displays (object occlusion, surface shading, perspective foreshortening, and texture gradients), Holografika's large-screen multi-projector video system (COHERENT-IST-FP6-510166) elicits useful stereoscopic and motion-parallax depth cues, and does so in under 2 s. We employed a simplified version of Julesz's (c. 1971) famous spiral ramp surface: a 3-layer cylindrical wedding-cake--via an openGL model that subjects viewed along its concentric axis. By adjusting its parameters, two sets of model-stimuli were rendered: one with a uniform large field of depth and one where the field was effectively flat. Each of eleven, pre-screened, subjects completed four experiments, each consisting of eight trials in a 2IFC design whereby they indicated in which interval they perceived the greatest field of depth. The experiments tested one-eye static, one-eye head-swaying, two-eye static, and two-eye head-swaying observation--in that order. Scores improved also in that order.}, url = {https://www.crs4.it/vic/data/papers/ecvp2007-holo.pdf}, } @Article{Cignoni:2007:RCB, idxkey = {TOP-THEME-MASSIVE-MODELS,Terrain, TCR, MR}, idxproject = {}, author = {Paolo Cignoni and Marco {Di Benedetto} and Fabio Ganovelli and Enrico Gobbetti and Fabio Marton and Roberto Scopigno}, title = {Ray-Casted BlockMaps for Large Urban Models Visualization}, journal = j-CG-FORUM, publisher = pub-BLACKWELL, address = pub-BLACKWELL:adr, year = 2007, volume = 26, number = 3, month = sep, pages = {405--413}, abstract = { We introduce a GPU-friendly technique that efficiently exploits the highly structured nature of urban environments to ensure rendering quality and interactive performance of city exploration tasks. Central to our approach is a novel discrete representation, called BlockMap, for the efficient encoding and rendering of a small set of textured buildings far from the viewer. A BlockMap compactly represents a set of textured vertical prisms with a bounded on-screen footprint. BlockMaps are stored into small fixed size texture chunks and efficiently rendered through GPU raycasting. Blockmaps can be seamlessly integrated into hierarchical data structures for interactive rendering of large textured urban models. We illustrate an efficient output-sensitive framework in which a visibility-aware traversal of the hierarchy renders components close to the viewer with textured polygons and employs BlockMaps for far away geometry. Our approach provides a bounded size far distance representation of cities, naturally scales with the improving shader technology, and outperforms current state of the art approaches. Its efficiency and generality is demonstrated with the interactive exploration of a large textured model of the city of Paris on a commodity graphics platform. }, url = {https://www.crs4.it/vic/data/papers/eg2006-blockmaps.pdf}, note = {Proc. Eurographics 2007}, } @Article{Pajarola:2007:SSR, idxkey = {MR, Terrain}, idxproject = {CYBERSAR}, author = {Renato Pajarola and Enrico Gobbetti}, title = {Survey on Semi-Regular Multiresolution Models for Interactive Terrain Rendering}, journal = {The Visual Computer}, year = 2007, volume = 23, number = 8, pages = {583--605}, abstract = { Rendering high quality digital terrains at interactive rates requires carefully crafted algorithms and data structures able to balance the competing requirements of realism and frame rates, while taking into account the memory and speed limitations of the underlying graphics platform. In this survey, we analyze multi-resolution approaches that exploit a certain semi-regularity of the data. These approaches have produced some of the most efficient systems to date. After providing a short background and motivation for the methods, we focus on illustrating models based on tiled blocks and nested regular grids, quadtrees and triangle bin-trees triangulations, as well as cluster based approaches. We then discuss LOD error metrics and system-level data management aspects of interactive terrain visualization, including dynamic scene management, out-of-core data organization and compression, as well as numerical accuracy. }, url = {https://www.crs4.it/vic/data/papers/tvc2007-semi-regular.pdf} } @Proceedings{Gobbetti:2007:CGIM, title = {The IASTED Conference on Computer Graphics and Imaging}, year = 2007, editor = {Enrico Gobbetti}, isbn = {978-0-88986-644-7}, address = {Conference held in Innsbruck, Austria, February 13-15}, organization = {IASTED}, url = {https://www.crs4.it/vic/data/papers/IASTED-CGIM2007-Foreword.pdf} } @InProceedings{Bettio:2007:MVM, idxkey = {MR, Holography, Parallel}, idxproject = {CYBERSAR}, author = {Fabio Bettio and Enrico Gobbetti and Fabio Marton and Giovanni Pintore}, title = {Multiresolution Visualization of Massive Models on a Large Spatial {3D} Display}, abstract = { }, booktitle = {Proc. Eurographics Symposium on Parallel Graphics and Visualization}, month = may, year = 2007, organization = pub-EUROGRAPHICS, publisher = pub-EUROGRAPHICS:adr, note = {}, thumbnail = {https://www.crs4.it/vic/data/papers/egpgv07-holo.jpg}, url = {}, } @InProceedings{Bettio:2007:HQN, idxkey = {MR, Terrain, Streaming}, idxproject = {VICTERRAIN3D, CYBERSAR}, author = {Fabio Bettio and Enrico Gobbetti and Fabio Marton and Giovanni Pintore}, title = {High-quality networked terrain rendering from compressed bitstreams}, abstract = { We describe a compressed multiresolution representation and a client-server architecture for supporting interactive high quality remote visualization of very large textured planar and spherical terrains. Our approach incrementally updates a chunked level-of-detail BDAM hierarchy by using precomputed wavelet coefficient matrices decoded from a compressed bitstream originating from a thin server. The structure combines the aggressive compression rates of wavelet-based image representations with the ability to ensure overall geometric continuity for variable resolution views of planar and spherical terrains with no need for run-time stitching. The efficiency of the approach is demonstrated on a large scale interactive remote visualization of global and local terrains on ADSL networks. A library implementing an early version of this work has been incorporated into a widely distributed geo-viewing system with tens of thousands of clients. }, booktitle = {Proc. ACM Web3D International Symposium}, pages = {37-44}, month = apr, year = 2007, organization = pub-ACM, publisher = pub-ACM:adr, url = {https://www.crs4.it/vic/data/papers/web3d2007-wsbdam.pdf} } @InProceedings{Bettio:2007:RCS, idxkey = {Surgical}, idxproject = {}, title = {A practical vision based approach to unencumbered direct spatial manipulation in virtual worlds}, author = {Fabio Bettio and Andrea Giachetti and Enrico Gobbetti and Fabio Marton and Giovanni Pintore}, abstract = { We present a practical approach for developing interactive environments that allows humans to interact with large complex 3D models without them having to manually operate input devices. The system provides support for scene manipulation based on hand tracking and gesture recognition and for direct 3D interaction with the 3D models in the display space if a suitably registered 3D display is used. Being based on markerless tracking of a user's two hands, the system does not require users to wear any input or output devices. 6DOF input is provided by using both hands simultaneously, making the tracker more robust since only tracking of position information is required. The effectiveness of the method is demonstrated with a simple application for model manipulation on a large stereo display, in which rendering constraints are met by employing state-of-the-art multiresolution techniques. }, month = feb, year = 2007, booktitle = {Eurographics Italian Chapter Conference}, address = {Conference held in Trento, Italy}, publisher = pub-EUROGRAPHICS, url = {https://www.crs4.it/vic/data/papers/egit2007-handtracking.pdf}, } @InProceedings{Agus:2007:MVN, idxkey = {Surgical}, idxproject = {COHERENT}, title = {Medical Visualization with New Generation Spatial {3D} Displays}, author = {Marco Agus and Fabio Bettio and Enrico Gobbetti and Giovanni Pintore}, abstract = { In this paper the capabilities of a modern spatial 3D display are exploited for medical visualization tasks. The system gives multiple viewers the illusion of seeing virtual objects floating at fixed physical locations. The usage of this kind of display in conjunction with 3D visualization techniques helps disambiguating complex images, so it is proven to be a real advantage for immediate understanding and visualization of medical data. We demonstrate this by reporting on some preliminary test cases of direct volume rendering techniques~(Maximum Intensity Projection and X Ray simulation), as well as an example of a collaborative medical diagnostic application for analysis of Abdominal Aortic Aneurysms. }, month = feb, year = 2007, booktitle = {Eurographics Italian Chapter Conference}, address = {Conference held in Trento, Italy}, publisher = pub-EUROGRAPHICS, url = {https://www.crs4.it/vic/data/papers/egit2007-holomedical.pdf}, } %################################ %### 2006 %################################ @Booklet{Kasik:2006:SAM, idxproject = {BOEING777}, title = {Real-Time Interactive Massive Model Visualization}, author = {David Kasik and Dinesh Manocha and Abe Stephens and Beat Bruderlin and Philipp Slusallek and Enrico Gobbetti and Wagner Correa and Inigo Quilez}, howpublished = {Eurographics 2006 Course Notes}, address = {Eurographics 2006, Vienna}, month = sep, year = 2006, thumbnail = {https://www.crs4.it/vic/data/papers/eg2006.jpg}, url = {}, } @InProceedings{Agus:2006:RTS, idxkey = {TOP-THEME-SURGICAL, Surgical}, idxproject = {EYESIM1}, author = {Marco Agus and Enrico Gobbetti and Giovanni Pintore and Gianluigi Zanetti and Antonio Zorcolo}, title = {Real Time Simulation of Phaco-emulsification for Cataract Surgery Training}, editor = {}, booktitle = {Workshop in Virtual Reality Interactions and Physical Simulations (VRIPHYS 2006)}, pages = {}, publisher = pub-EUROGRAPHICS, address = {}, note ={Conference held in Madrid, Spain, November 6-7}, isbn = {}, month = nov, year = {2006}, abstract = { We present a real--time simulation of the phaco-emulsification task in a virtual reality training system for cataract surgery. Phaco--emulsification consists in breaking in small fragments and completely removing the eye crystalline lens by employing an ultra--sound tool called phaco--emulsificator. Our approach employs a mesh-less shape--based dynamic algorithm integrated with a simplex geometry representation in order to efficiently handle the rendering process and the continuous modifications involved by the surgical tool interaction, and with a smoothed particle hydrodynamics scheme with spatial ordering for handling fragments interactions. The complete training system also simulates other tasks involved in cataract surgery, like the corneal incision and the capsulorhexis. The simulator runs on a multiprocessing PC platform and provides realistic physically-based visual simulations of tools interactions. The current setup employs SensAble PHANToM for simulating the interaction devices, and a binocular display for presenting images to the user. }, url = {https://www.crs4.it/vic/data/papers/vriphys2006-eyesim.pdf}, } @Article{Gobbetti:2006:CCB, idxkey = {TOP-THEME-MASSIVE-MODELS,Terrain, TCR, MR}, idxproject = {CRIMSON}, author = {Enrico Gobbetti and Fabio Marton and Paolo Cignoni and Marco Di Benedetto and Fabio Ganovelli}, title = {{C-BDAM} -- Compressed Batched Dynamic Adaptive Meshes for Terrain Rendering}, journal = j-CG-FORUM, publisher = pub-BLACKWELL, address = pub-BLACKWELL:adr, year = 2006, volume = 25, number = 3, month = sep, pages = {333-342}, abstract = { We describe a compressed multiresolution representation for supporting interactive rendering of very large planar and spherical terrain surfaces. The technique, called Compressed Batched Dynamic Adaptive Meshes (\textit{C-BDAM}), is an extension of the BDAM and P-BDAM chunked level-of-detail hierarchy. In the C-BDAM approach, all patches share the same regular triangulation connectivity and incrementally encode their vertex attributes using a quantized representation of the difference with respect to values predicted from the coarser level. The structure provides a number of benefits: simplicity of data structures, overall geometric continuity for planar and spherical domains, support for variable resolution input data, management of multiple vertex attributes, efficient compression and fast construction times, ability to support maximum-error metrics, real-time decompression and shaded rendering with configurable variable level-of-detail extraction, and runtime detail synthesis. The efficiency of the approach and the achieved compression rates are demonstrated on a number of test cases, including the interactive visualization of a 29 gigasample reconstruction of the whole planet Earth created from high resolution SRTM data. }, url = {https://www.crs4.it/vic/data/papers/eg2006-cbdam.pdf}, note = {Proc. Eurographics 2006}, } @InProceedings{Balogh:2006:IMH, idxkey = {Holography}, idxproject = {COHERENT}, author = {Tibor Balogh and Zsuzsa Dobranyi and Tamas Forgacs and Attila Molnar and Laszlo Szloboda and Enrico Gobbetti and Fabio Marton and Fabio Bettio and Giovanni Pintore and Gianluigi Zanetti and Eric Bouvier and Reinhard Klein}, title = {An Interactive Multi-User Holographic Environment}, series = {Annual Conference Series}, pages = {}, booktitle = {SIGGRAPH 2006 Emerging Technologies Proceedings}, year = {2006}, organization = {ACM SIGGRAPH}, publisher = pub-AW, month = aug, address = {Conference held in Boston, MA, USA}, abstract = { We present an interactive multi-user holographic environment that allows freely moving naked eye participants to share a large 3D scene with fully continuous, observer independent, parallax. }, url = {https://www.crs4.it/vic/data/papers/sig2006-etech-holo.pdf} } @InProceedings{Agocs:2006:LSI, idxkey = {Holography}, idxproject = {COHERENT}, author = {Tibor Agocs and Tibor Balogh and Tamas Forgacs and Fabio Bettio and Enrico Gobbetti and Gianluigi Zanetti}, title = {A Large Scale Interactive Holographic Display}, booktitle = {Proc. IEEE VR 2006 Workshop on Emerging Display Technologies}, abstract = { Our work focuses on the development of interactive multi-user holographic displays that allow freely moving naked eye participants to share a three dimensional scene with fully continuous, observer independent, parallax. Our approach is based on a scalable design that exploits a specially arranged array of projectors and a holographic screen. The feasibility of such an approach has already been demonstrated with a working hardware and software 7.4M pixel prototype driven at 10-15Hz by two DVI streams. In this short contribution, we illustrate our progress, presenting a 50M pixel display prototype driven by a dedicated cluster hosting multiple consumer level graphic cards. }, year = 2006, address = {Conference Held in Alexandria, VA, USA, March 26 2006}, note = {CD ROM Proceedings}, url = {https://www.crs4.it/vic/data/papers/ieeevr2006ws-holo.pdf} } @InProceedings{Agus:2006:RCS, idxkey = {Surgical}, idxproject = {EYESIM1}, title = {Real-time Cataract Surgery Simulation for Training}, author = {Marco Agus and Enrico Gobbetti and Giovanni Pintore and Gianluigi Zanetti and Antonio Zorcolo}, year = 2006, booktitle = {Eurographics Italian Chapter Conference}, address = {Conference held in Catania, Italy}, publisher = pub-EUROGRAPHICS, url = {https://www.crs4.it/vic/data/papers/egit2006-eyesim.pdf} } @InCollection{Bettio:2006:HCM, idxdatesub = "2005:07", idxdatepub = "2006:01", idxkey = {Holo, Surgical}, idxproject = {COHERENT}, author = {Fabio Bettio and Francesca Frexia and Andrea Giachetti and Enrico Gobbetti and Giovanni Pintore and Gianluigi Zanetti and Tibor Balogh and Tamas Forgacs and Tibor Agocs and Eric Bouvier}, title = {A Holographic Collaborative Medical Visualization System}, editor = {J. D. Westwood}, booktitle = {Medicine Meets Virtual Reality 2006}, publisher = pub-IOS, address = pub-IOS:adr, pages = {}, isbn = {}, month = jan, year = {2006}, abstract = { We report on our work on the development of a novel holographic display technology, capable of targeting multiple freely moving naked eye viewers, and of a demonstrator exploiting this technology to provide medical specialists with a truly interactive collaborative 3D environment for diagnostic discussions and/or pre-operative planning. }, url = {https://www.crs4.it/vic/data/papers/mmvr-2006.pdf}, } %################################ %### 2005 %################################ @InProceedings{Gobbetti:2005:CRH, idxdatesub = "2005:07", idxdatepub = "2005:11", idxkey = {TCR, Terrain}, idxproject = {CRIMSON}, author = {Enrico Gobbetti and Fabio Marton}, title = {Compression and rendering of high resolution planetary scale digital elevation models}, editor = {}, booktitle = {Proceedings of the Fifth MIMOS Conference}, publisher = {}, address = {Conference held in Turin, Italy, November 3--5, 2005}, pages = {}, isbn = {}, month = nov, year = {2005}, abstract = { In this contribution, we illustrate a technique for incorporating aggressive compression methods in the BDAM adaptive resolution framework for terrain rendering. The new structure provides a number of benefits: simplicity of data structures, overall geometric continuity, efficient compression and fast construction times, real-time decompression and rendering with configurable variable level-of-detail extraction, and runtime detail synthesis. }, url = {https://www.crs4.it/vic/data/papers/mimos2005-compressed-terrains.pdf}, note = {CD ROM Proceedings} } @InProceedings{Gobbetti:2005:IEG, idxdatesub = "2005:07", idxdatepub = "2005:11", idxkey = {TCR, Terrain, MR}, idxproject = {}, author = {Enrico Gobbetti and Fabio Marton}, title = {Interactive exploration of gigantic geometric models on commodity graphics platforms}, editor = {}, booktitle = {Proceedings of the Fifth MIMOS Conference}, publisher = {}, address = {Conference held in Turin, Italy, November 3--5, 2005}, pages = {}, isbn = {}, month = nov, year = {2005}, url = {https://www.crs4.it/vic/data/papers/mimos2005-large-models.pdf}, abstract = { Many important application domains, including remote sensing, 3D scanning, computer aided design, and numerical simulation, require the interactive inspection of huge geometric models. Despite the rapid improvement in hardware performance, rendering today's multi-gigabyte datasets at interactive rates largely overloads the performance and memory capacity of state-of-the-art hardware platforms. To overcome this limitation, researchers have proposed a wide variety of output-sensitive rendering algorithms, i.e., rendering techniques whose runtime and memory footprint is proportional to the number of image pixels, not to the total model complexity In this contribution, we illustrate our work on a new breed of techniques that are particularly well suited to harness the power of current graphics hardware. }, note = {CD ROM Proceedings} } @InProceedings{Bettio:2005:CIV, idxdatesub = "2005:07", idxdatepub = "2005:11", idxkey = {TCR, Surgical, Volren}, idxproject = {}, author = {Fabio Bettio and Francesca Frexia and Enrico Gobbetti and Giovanni Pintore and Gianluigi Zanetti and Tibor Balogh and Tamas Forgacs and Tibor Agocs and Eric Bouvier}, title = {Collaborative immersive visualization without goggles -- experiences in developing a holographics display system for medical applications}, editor = {}, booktitle = {Proceedings of the Fifth MIMOS Conference}, publisher = {}, address = {Conference held in Turin, Italy, November 3--5, 2005}, pages = {}, isbn = {}, month = nov, year = {2005}, abstract = { In this contribution, we report on the development of a novel holographic display technology that targets multiple freely moving naked eye viewers and of a collaborative medical application prototype that aims at exploiting this technology to provide medical specialists with a truly interactive 3D collaborative environment for diagnostic discussions and/or pre-operative planning. }, url = {https://www.crs4.it/vic/data/papers/mimos2005-holo.pdf}, note = {CD ROM Proceedings} } @Booklet{Gobbetti:2005:MGC, idxstatus = "Lecture", idxmedium = "Text", idxkey = {MR,TCR}, idxproject = {CRIMSON}, title = {Interactive Rendering of Massive Geometric Models}, author = {Enrico Gobbetti}, howpublished = {Tutorial notes, Eurographics Italy}, address = {Conference held in Pisa, Italy, February 17--18, CDROM Proceedings}, month = feb, year = 2005, url = {https://www.crs4.it/vic/data/papers/egit-2005-massive.pdf}, thumbnail = {https://www.crs4.it/vic/img/thumb-none.jpg}, } @InCollection{Bettio:2005:3FM, idxdatesub = "2005:03", idxdatepub = "2005:09", idxkey = {Surgical, Volren}, idxproject = {URBAN}, author = {Fabio Bettio and Francesca Frexia and Andrea Giachetti and Enrico Gobbetti and Giovanni Pintore and Gianluigi Zanetti}, title = {{3D} Functional Models of Monkey Brain through Elastic Registration of Histological Sections}, editor = {}, booktitle = {International Conference on Image Analysis and Processing}, publisher = pub-SV, address = pub-SV:adr, series = ser-LNCS, volume = {}, isbn = {}, pages = {}, month = sep, year = 2005, abstract = { In this paper we describe a method for the reconstruction and visualization of functional models of monkey brains. Models are built through the registration of high resolution images obtained from the scanning of histological sections with reference photos taken during the brain slicing. From the histological sections it is also possible to acquire specifically activated neuron coordinates introducing functional information in the model. Due to the specific nature of the images (texture information is useless and the sections could be deformed when they were cut and placed on glass) we solved the registration problem by extracting corresponding cerebral cortex borders (extracted with a snake algorithm), and computing from their deformation an image transform modeled as an affine deformation plus a non-linear field evaluated as an elastically constrained deformation minimizing contour distances. Registered images and contours are used then to build 3D models of specific brains by a software tool allowing the interactive visualization of cortical volumes together with the spatially referenced neurons classified and differently colored according to their functionalities. }, note = {Proc. ICIAP 2005, Cagliari, Italy, September 6--8, 2005}, url = {https://www.crs4.it/vic/data/papers/iciap05-brain.pdf}, } @InProceedings{Cignoni:2005:GFM, idxdatesub = "2005:03", idxdatepub = "2005:10", idxkey = {TOP-THEME-MESHES, Terrain, TCR, MR}, idxproject = {}, author = {Paolo Cignoni and Fabio Ganovelli and Enrico Gobbetti and Fabio Marton and Federico Ponchio and Roberto Scopigno}, title = {Batched Multi Triangulation}, booktitle = {Proceedings IEEE Visualization}, year = 2005, address = {Conference held in Minneapolis, MI, USA}, month = oct, publisher = pub-IEEE, pages = {207-214}, abstract = { The Multi Triangulation framework (MT) is a very general approach for managing adaptive resolution in triangle meshes. The key idea is arranging mesh fragments at different resolution in a Directed Acyclic Graph (DAG) which encodes the dependencies between fragments, thereby encompassing a wide class of multiresolution approaches that use hierarchies or DAGs with predefined topology. On current architectures, the classic MT is however unfit for real-time rendering, since DAG traversal costs vastly dominate raw rendering costs. In this paper, we redesign the MT framework in a GPU friendly fashion, moving its granularity from triangles to precomputed optimized triangle patches. The patches can be conveniently tri-stripped and stored in secondary memory to be loaded on demand, ready to be sent to the GPU using preferential paths. In this manner, central memory only contains the DAG structure and CPU workload becomes negligible. The major contributions of this work are: a new out-of-core multiresolution framework, that, just like the MT, encompasses a wide class of multiresolution structures; a robust and elegant way to build a well conditioned MT DAG by introducing the concept of V -partitions, that can encompass various state of the art multiresolution algorithms; an efficient multithreaded rendering engine and a general subsystem for the external memory processing and simplification of huge meshes. }, url = {https://www.crs4.it/vic/data/papers/ieeeviz2005-gpumt.pdf}, } @InProceedings{Brelstaff:2005:TPE, idxdatesub = "2005:05", idxdatepub = "2005:08", idxkey = {Haptics, Surgical}, idxproject = {SCACMI}, author = {Gavin Brelstaff and Marco Agus and Andrea Giachetti and Enrico Gobbetti and Gianluigi Zanetti and Antonio Zorcolo and Bruno Picasso and Stefano Sellari Franceschini}, title = {Towards a psychophysical evaluation of a surgical simulator for bone-burring}, booktitle = {Proc. Second Symposium on Applied Perception in Graphics and Visualization}, year = 2005, address = {Conference held in A Coruña, Spain, August 22--26}, month = aug, publisher = pub-ACM, pages = {139-143}, abstract = { The CRS4 experimental bone-burr simulator implements visual and haptic effects through the incorporation of a physics-based contact model and patient-specific data. Psychophysical tests demonstrate that, despite its simplified model and its inherent technological constraints, the simulator can articulate material differences, and that its users can learn to associate virtual bone with real bone material. Tests addressed both surface probing and interior drilling task. We also explore a haptic contrast sensitivity function based on the model s two main parameters: an elastic constant and an erosion factor. Both parameters manifest power-law-like sensitivity with respective exponents of around two and three. Further tests may reveal how well simulator users perceive fine differences in bone material, like those encountered while drilling through real volume boundaries. }, url = {https://www.crs4.it/vic/data/papers/apgv05-psychophysical-burr.pdf}, } @InProceedings{Balogh:2005:SHS, idxkey = {Holography}, idxproject = {COHERENT}, author = {Tibor Balogh and Tamas Forgacs and Tibor Agocs and Olivier Balet and Eric Bouvier and Fabio Bettio and Enrico Gobbetti and Gianluigi Zanetti}, title = {A Scalable Hardware and Software System for the Holographic Display of Interactive Graphics Applications}, booktitle = {EUROGRAPHICS 2005 Short Papers Proceedings}, abstract = { We present a scalable holographic system design targeting multi-user interactive computer graphics applications. The display uses a specially arranged array of micro-displays and a holographic screen. Each point of the holographic screen emits light beams of different color and intensity to the various directions, in a controlled manner. The light beams are generated through a light modulation system arranged in a specific geometry and the holographic screen makes the necessary optical transformation to compose these beams into a perfectly continuous 3D view. With proper software control, the light beams leaving the various pixels can be made to propagate in multiple directions, as if they were emitted from physical objects at fixed spatial locations. The display is driven by DVI streams generated by multiple consumer level graphics boards and decoded in real-time by image processing units that feed the optical modules at high refresh rates. An OpenGL compliant library running on a client PC redefines the OpenGL behavior to multicast graphics commands to server PCs, where they are re-interpreted for implementing holographic rendering. The feasibility of the approach has been successfully evaluated with a working hardware and software 7.4M pixel prototype driven at 10-15Hz by three DVI streams. }, year = 2005, address = {Conference Held in Dublin, Ireland, August 2005}, url = {https://www.crs4.it/vic/data/papers/eg2005-holo.pdf} } @Article{Gobbetti:2005:FV, idxdatesub = "2005:1", idxdatepub = "2005:8", idxkey = {TOP-THEME-MASSIVE-MODELS, TCR, MR}, idxproject = {BOEING777}, author = {Enrico Gobbetti and Fabio Marton}, title = {{Far Voxels} -- A Multiresolution Framework for Interactive Rendering of Huge Complex {3D} Models on Commodity Graphics Platforms}, journal = {ACM Transactions on Graphics}, publisher = pub-ACM, address = pub-ACM:adr, year = 2005, volume = 24, number = 3, month = aug, pages = {878--885}, abstract = { We present an efficient approach for end-to-end out-of-core construction and interactive inspection of very large arbitrary surface models. The method tightly integrates visibility culling and out-of-core data management with a level-of-detail framework. At preprocessing time, we generate a coarse volume hierarchy by binary space partitioning the input triangle soup. Leaf nodes partition the original data into chunks of a fixed maximum number of triangles, while inner nodes are discretized into a fixed number of cubical voxels. Each voxel contains a compact direction dependent approximation of the appearance of the associated volumetric subpart of the model when viewed from a distance. The approximation is constructed by a visibility aware algorithm that fits parametric shaders to samples obtained by casting rays against the full resolution dataset. At rendering time, the volumetric structure, maintained off-core, is refined and rendered in front-to-back order, exploiting vertex programs for GPU evaluation of view-dependent voxel representations, hardware occlusion queries for culling occluded subtrees, and asynchronous I/O for detecting and avoiding data access latencies. Since the granularity of the multiresolution structure is coarse, data management, traversal and occlusion culling cost is amortized over many graphics primitives. The efficiency and generality of the approach is demonstrated with the interactive rendering of extremely complex heterogeneous surface models on current commodity graphics platforms. }, url = {https://www.crs4.it/vic/data/papers/sig2005-farvox.pdf}, note = {Proc. SIGGRAPH 2005} } @InProceedings{Balogh:2005:SHD, idxkey = {Holography}, idxproject = {COHERENT}, author = {Tibor Balogh and Tamas Forgacs and Olivier Balet and Eric Bouvier and Fabio Bettio and Enrico Gobbetti and Gianluigi Zanetti}, title = {A Scalable Holographic Display for Interactive Graphics Applications}, booktitle = {Proc. IEEE VR 2005 Workshop on Emerging Display Technologies}, abstract = { We present a scalable holographic system design targeting multi-user interactive computer graphics applications. The display device is based on back-projection technology and uses a specially arranged array of microdisplays and a holographic screen. The display is driven by DVI streams generated by multiple consumer level graphics boards and decoded in real-time by image processing units that feed the optical modules at high refresh rates. An OpenGL compliant library running on a client PC redefines the OpenGL behavior to multicast graphics commands to server PCs, where they are reinterpreted in order to implement holographic rendering. The feasibility of the approach is demonstrated with a working hardware and software 7.4M pixel prototype driven at 10-15Hz by two DVI streams. }, year = 2005, address = {Conference Held in Bonn, Germany, March 13 2005}, note = {CD ROM Proceedings}, url = {https://www.crs4.it/vic/data/papers/ieeevr2005ws-holo.pdf} } %################################ %### 2004 %################################ @InProceedings{Agus:2004:HAD, idxdatesub = "2004:09", idxdatepub = "2004:06", idxkey = {TCR,VOLREN}, idxproject = {}, author = {Marco Agus and Andrea Giachetti and Enrico Gobbetti and Gianluigi Zanetti and Antonio Zorcolo}, title = {Hardware-Accelerated Dynamic Volume Rendering for Real--Time Surgical Simulation}, editor = {}, booktitle = {Workshop in Virtual Reality Interactions and Physical Simulations (VRIPHYS 2004)}, pages = {}, publisher = {}, address = {}, note ={Conference held in Colima, Mexico, September 20-21, 2004}, isbn = {}, month = sep, year = {2004}, abstract = { We developed a direct volume rendering technique, that supports low latency real time visual feedback in parallel with physical simulation on commodity graphics platforms. In our approach, a fast approximation of the diffuse shading equation is computed on the fly by the graphics pipe-line directly from the scalar data. We do this by exploiting the possibilities offered by multi-texturing with the register combiner OpenGL extension, that provides a configurable means to determine per-pixel fragment coloring. The effectiveness of our approach, that supports a full decoupling of simulation and rendering, is demonstrated in a training system for temporal bone surgery. }, url = {https://www.crs4.it/vic/data/papers/vriphys2004-dvr.pdf}, } @Article{Gobbetti:2004:lpcb, idxdatesub = "2004:7", idxdatepub = "2004:12", idxkey = {TOP-THEME-MESHES, TCR, MR}, idxproject = {}, author = {Enrico Gobbetti and Fabio Marton}, title = {Layered Point Clouds -- a Simple and Efficient Multiresolution Structure for Distributing and Rendering Gigantic Point-Sampled Models}, journal = {Computers \& Graphics}, publisher = pub-ELS, address = pub-ELS:adr, year = 2004, volume = 28, number = 6, month = dec, pages = {815--826}, abstract = { We recently introduced an efficient multiresolution structure for distributing and rendering very large point sampled models on consumer graphics platforms~\cite{Gobbetti:2004:LPC}. The structure is based on a hierarchy of precomputed object-space point clouds, that are combined coarse-to-fine at rendering time to locally adapt sample densities according to the projected size in the image. The progressive block based refinement nature of the rendering traversal exploits on-board caching and object based rendering APIs, hides out-of-core data access latency through speculative prefetching, and lends itself well to incorporate backface, view frustum, and occlusion culling, as well as compression and view-dependent progressive transmission. The resulting system allows rendering of complex out-of-core models at high frame rates (over 60M rendered points/second), supports network streaming, and is fundamentally simple to implement. We demonstrate the efficiency of the approach on a number of very large models, stored on local disks or accessed through a consumer level broadband network, including a massive 234M samples isosurface generated by a compressible turbulence simulation and a 167M samples model of Michelangelo's St. Matthew. Many of the details of our framework were presented in a previous study. We here provide a more thorough exposition, but also significant new material, including the presentation of a higher quality bottom-up construction method and additional qualitative and quantitative results. }, url = {https://www.crs4.it/vic/data/papers/cag2004-lpc.pdf}, } @TechReport{Bouvier:2004:SS, idxdatesub = "2004:12", idxdatepub = "2004:12", idxkey = {HOLO,TCR, MR}, idxproject = {COHERENT}, title = {Communication and Display SDK}, author = {Eric Bouvier and Fabio Bettio and Enrico Gobbetti and Nicolas Baudrey and Romain Maurer and David Sanchez and Laurent Philippon and Tamas Forgacs and Peter Breuer}, type = {Deliverable}, number = {D9}, institution = {EU Project COHERENT (IST-FP6-510166)}, month = dec, year = 2004, url = {}, } @TechReport{Agocs:2004:SS, idxdatesub = "2004:08", idxdatepub = "2004:08", idxkey = {HOLO,TCR, MR}, idxproject = {COHERENT}, title = {System Specification}, author = {Tibor Agocs and Tibor Balogh and Eric Bouvier and Enrico Gobbetti and Gianluigi Zanetti}, type = {Deliverable}, number = {D8}, institution = {EU Project COHERENT (IST-FP6-510166)}, month = aug, year = 2004, url = {}, } @TechReport{Gobbetti:2004:IUR, idxdatesub = "2004:05", idxdatepub = "2004:05", idxkey = {HOLO,TCR, MR}, idxproject = {COHERENT}, title = {Initial User Requirement Definition}, author = {Enrico Gobbetti and Gianluigi Zanetti}, type = {Deliverable}, number = {D4}, institution = {EU Project COHERENT (IST-FP6-510166)}, month = apr, year = 2004, url = {}, } @TechReport{Cignoni:2004:TCC, idxdatesub = "2004:06", idxdatepub = "2004:06", idxkey = {Terrain, TCR, MR}, idxproject = {VPLANET}, title = {Time critical components - final release}, author = {Paolo Cignoni and Fabio Ganovelli and Enrico Gobbetti and Fabio Marton and Federico Ponchio and Roberto Scopigno}, type = {Deliverable}, number = {D5.2.2}, institution = {EU Project V-PLANET (IST-2000-28095)}, month = jun, year = 2004, url = {}, } @InProceedings{Gobbetti:2004:LPC, idxdatesub = "2000:12", idxdatepub = "2001:05", idxkey = {TCR}, idxproject = {VPLANET}, author = {Enrico Gobbetti and Fabio Marton}, title = {Layered Point Clouds}, editor = {Marc Alexa and Markus Gross and Hanspeter Pfister and Szymon Rusinkiewicz}, booktitle = {Eurographics Symposium on Point Based Graphics}, pages = {113--120, 227}, publisher = pub-EUROGRAPHICS, address = pub-EUROGRAPHICS:adr, note ={Conference held in Zurich, Switzerland, June 2--5, 2004}, isbn = {3-905673-09-6}, month = jun, year = {2004}, abstract = { We present a simple point-based multiresolution structure for interactive visualization of very large point sampled models on consumer graphics platforms. The structure is based on a hierarchy of precomputed object-space point clouds. At rendering time, the clouds are combined coarse-to-fine with a top-down structure traversal to locally adapt sample densities according to the projected size in the image. Since each cloud is made of a few thousands of samples, the multiresolution extraction cost is amortized over many graphics primitives, and host-to-graphics communication effectively exploits on-board caching and object based rendering APIs. The progressive block based refinement nature of the rendering traversal is well suited to hiding out-of-core data access latency, and lends itself well to incorporate backface, view frustum, and occlusion culling, as well as compression and view-dependent progressive transmission. The resulting system allows rendering of complex models at high frame rates (over 60M splat/second), supports network streaming, and is fundamentally simple to implement. }, url = {https://www.crs4.it/vic/data/papers/spbg04-lpc.pdf}, } @Article{Cignoni:2004:ATE, idxdatesub = "2004:1", idxdatepub = "2004:8", idxkey = {TOP-THEME-MESHES, TCR, MR}, idxproject = {VPLANET}, author = {Paolo Cignoni and Fabio Ganovelli and Enrico Gobbetti and Fabio Marton and Federico Ponchio and Roberto Scopigno}, title = {Adaptive {TetraPuzzles} -- Efficient Out-of-core Construction and Visualization of Gigantic Polygonal Models}, journal = {ACM Transactions on Graphics}, publisher = pub-ACM, address = pub-ACM:adr, year = 2004, volume = 23, number = 3, month = aug, pages = {796--803}, abstract = { We describe an efficient technique for out-of-core construction and accurate view-dependent visualization of very large surface models. The method uses a regular conformal hierarchy of tetrahedra to spatially partition the model. Each tetrahedral cell contains a precomputed simplified version of the original model, represented using cache coherent indexed strips for fast rendering. The representation is constructed during a fine-to-coarse simplification of the surface contained in diamonds (sets of tetrahedral cells sharing their longest edge). The construction preprocess operates out-of-core and parallelizes nicely. Appropriate boundary constraints are introduced in the simplification to ensure that all conforming selective subdivisions of the tetrahedron hierarchy lead to correctly matching surface patches. For each frame at runtime, the hierarchy is traversed coarse-to-fine to select diamonds of the appropriate resolution given the view parameters. The resulting system can interatively render high quality views of out-of-core models of hundreds of millions of triangles at over 40Hz (or 70M triangles/s) on current commodity graphics platforms. }, url = {https://www.crs4.it/vic/data/papers/sig2004-tetrapuzzles.pdf}, note = {Proc. SIGGRAPH 2004} } @PhdThesis{Agus:2004:HVS, author = {Marco Agus}, title = {Haptic and Visual Simulation of Bone Dissection}, school = {Dept. of Mechanical Engineering, University of Cagliari, Italy}, year = 2004, url = {https://www.crs4.it/vic/data/papers/2004-phd-agus-virtual_bone_dissection.pdf}, abstract= { In bone dissection virtual simulation, force restitution represents the key to realistically mimicking a patient--specific operating environment. The force is rendered using haptic devices controlled by parametrized mathematical models that represent the bone--burr contact. This dissertation presents and discusses a haptic simulation of a bone cutting burr, that it is being developed as a component of a training system for temporal bone surgery. A physically based model was used to describe the burr--bone interaction, including haptic forces evaluation, bone erosion process and resulting debris. The model was experimentally validated and calibrated by employing a custom experimental set--up consisting of a force--controlled robot arm holding a high--speed rotating tool and a contact force measuring apparatus. Psychophysical testing was also carried out to assess individual reaction to the haptic environment. The results suggest that the simulator is capable of rendering the basic material differences required for bone burring tasks. The current implementation, directly operating on a voxel discretization of patient-specific 3D CT and MR imaging data, is efficient enough to provide real--time haptic and visual feedback on a low--end multi--processing PC platform. } } @InProceedings{Agus:2004:PBB, idxdatesub = "2003:8", idxdatepub = "2004:04", idxkey = {Haptics, Surgical}, idxproject = {SCACMI}, author = {Marco Agus and Gavin Brelstaff and Andrea Giachetti and Enrico Gobbetti and Gianluigi Zanetti and Antonio Zorcolo and Bruno Picasso and Stefano Sellari Franceschini}, title = {Physics-based burr haptic simulation: tuning and evaluation}, booktitle = {Proc. 12th International Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems (HAPTICS04)}, year = 2004, address = {Conference held in Chicago, IL, USA}, month = apr, publisher = pub-IEEE, pages = {128--135}, abstract = { In this paper we provide a preliminary report on our work on the tuning of a temporal bone surgical simula-tor using parameter values derived from experimental mea-surements, and on the comparison between these results and the previously used domain expert assigned values. Our preliminary results indicate that the parameter values de-fined by the domain-experts are consistent with the exper-imentally derived values. Psychophysical testing indicates that the simulator is capable of rendering the basic ma-terial differences required for bone burring work and that some trained users preferentially associate a simulated tem-poral bone resin model with its real counterpart. }, url = {https://www.crs4.it/vic/data/papers/haptics04-burr.pdf}, note = {Best Student Paper Award}, } %################################ %### 2003 %################################ @TechReport{Cignoni:2003:TCC, idxdatesub = "2003:07", idxdatepub = "2003:07", idxkey = {Terrain, TCR, MR}, idxproject = {VPLANET}, title = {Time critical components - first release}, author = {Paolo Cignoni and Fabio Ganovelli and Enrico Gobbetti and Fabio Marton and Federico Ponchio and Roberto Scopigno}, type = {Deliverable}, number = {D5.2.1}, institution = {EU Project V-PLANET (IST-2000-28095)}, month = jun, year = 2003, url = {}, } @Booklet{Gobbetti:2003:MGC, idxstatus = "Lecture", idxmedium = "Text", idxkey = {MR,TCR}, idxproject = {VPLANET}, title = {Multiresolution Graphics on Commodity Graphics Platforms}, author = {Enrico Gobbetti}, howpublished = {Tutorial notes, Eurographics Italy}, address = {Conference held in Milan, Italy, September 25--26, CDROM Proceedings}, month = sep, year = 2003, url = {https://www.crs4.it/vic/data/papers/egit-2003-full.pdf}, thumbnail = {https://www.crs4.it/vic/img/thumb-none.jpg}, } @InCollection{Cignoni:2003:IOV, idxstatus = "Published", idxmedium = "Text", idxdatesub = "2002:12", idxdatepub = "2003:11", idxkey = {Terrain, TCR, MR}, idxproject = {VPLANET}, author = {Paolo Cignoni and Fabio Ganovelli and Enrico Gobbetti and Fabio Marton and Federico Ponchio and Roberto Scopigno}, title = {Interactive Out-of-core Visualization of Very Large Landscapes on Commodity Graphics Platforms}, editor = {Olivier Balet and G{\'e}rard Subsol and Patrice Torguet}, booktitle = {International Conference on Virtual Storytelling}, publisher = pub-SV, address = pub-SV:adr, series = ser-LNCS, volume = {2897}, isbn = {3-540-20535-7}, pages = {21--29}, month = nov, year = 2003, abstract = { We recently introduced an efficient technique for out-of-core rendering and management of large textured landscapes. The technique, called Batched Dynamic Adaptive Meshes (BDAM), is based on a paired tree structure: a tiled quadtree for texture data and a pair of bintrees of small triangular patches for the geometry. These small patches are TINs that are constructed and optimized off-line with high quality simplification and tristripping algorithms. Hierarchical view frustum culling and view-dependent texture/geometry refinement is performed at each frame with a stateless traversal algorithm that renders a continuous adaptive terrain surface by assembling out of core data. Thanks to the batched CPU/GPU communication model, the proposed technique is not processor intensive and fully harnesses the power of current graphics hardware. This paper summarizes the method and discusses the results obtained in a virtual fly-through over a textured digital landscape derived from aerial imaging. }, note = {Proc. Second International Conference, ICVS 2003, Toulouse, France, November 20-21, 2003, Proceedings}, url = {https://www.crs4.it/vic/data/papers/icvs-2003.pdf}, } @InProceedings{Cignoni:2003:BMH, idxstatus = "Published", idxmedium = "Text", idxdatesub = "2003:3", idxdatepub = "2003:10", idxkey = {Terrain, TCR, MR}, idxproject = {VPLANET}, author = {Paolo Cignoni and Fabio Ganovelli and Enrico Gobbetti and Fabio Marton and Federico Ponchio and Roberto Scopigno}, title = {Batching Meshes for High Performance Terrain Visualization}, booktitle = {Proceedings Eurographics Italy}, year = 2003, address = {Conference held in Milan, Italy, September 25--26}, month = sep, note = {CDROM Proceedings}, abstract = { }, url = {}, } @InProceedings{Cignoni:2003:PBD, idxstatus = "Published", idxmedium = "Text", idxdatesub = "2003:3", idxdatepub = "2003:10", idxkey = {Terrain, TCR, MR}, idxproject = {VPLANET}, author = {Paolo Cignoni and Fabio Ganovelli and Enrico Gobbetti and Fabio Marton and Federico Ponchio and Roberto Scopigno}, title = {Planet--Sized Batched Dynamic Adaptive Meshes (P-BDAM)}, booktitle = {Proceedings IEEE Visualization}, year = 2003, address = {Conference held in Seattle, WA, USA}, month = oct, publisher = pub-IEEE, pages = {147--155}, abstract = { This paper describes an efficient technique for out-of-core management and interactive rendering of planet sized textured terrain surfaces. The technique, called P-Batched Dynamic Adaptive Meshes (P-BDAM), is based on BDAM structure. Data is partitioned into a set of BDAM tiles, each of them constituted by a pair of geometry bintrees of small triangular patches and an associated texture quadtree. Each triangular patch is a general triangulation of points on a displaced triangle. The proposed framework introduces several advances with respect to the state of the art: thanks to a batched host-to-graphics communication model, we outperform current adaptive tessellation solutions in terms of rendering speed; we guarantee overall geometric continuity, exploiting programmable graphics hardware to cope with the accuracy issues introduced by single precision floating points; we exploit a compressed out of core representation and speculative prefetching for hiding disk latency during rendering of out-of-core data; we efficiently construct high quality simplified representations with a novel distributed out of core simplification algorithm working on a standard PC network. }, url = {https://www.crs4.it/vic/data/papers/ieeeviz03-pbdam.pdf}, } @Article{Cignoni:2003:BBD, idxstatus = "Published", idxmedium = "Text", idxdatesub = "2003:1", idxdatepub = "2003:8", idxkey = {TOP-THEME-MASSIVE-MODELS, Terrain, TCR, MR}, idxproject = {VPLANET}, author = {Paolo Cignoni and Fabio Ganovelli and Enrico Gobbetti and Fabio Marton and Federico Ponchio and Roberto Scopigno}, title = {{BDAM} -- Batched Dynamic Adaptive Meshes for High Performance Terrain Visualization}, journal = j-CG-FORUM, publisher = pub-BLACKWELL, address = pub-BLACKWELL:adr, year = 2003, volume = 22, number = 3, month = sep, pages = {505--514}, abstract = { This paper describes an efficient technique for out-of-core rendering and management of large textured terrain surfaces. The technique, called Batched Dynamic Adaptive Meshes (\textit{BDAM}), is based on a paired tree structure: a tiled quadtree for texture data and a pair of bintrees of small triangular patches for the geometry. These small patches are TINs that are constructed and optimized off-line with high quality simplification and tristripping algorithms. Hierarchical view frustum culling and view-dependendent texture and geometry refinement is performed at each frame with a stateless traversal algorithm that renders a continuous adaptive terrain surface by assembling out of core data. Thanks to the batched CPU/GPU communication model, the proposed technique is not processor intensive and fully harnesses the power of current graphics hardware. Both preprocessing and rendering exploit out of core techniques to be fully scalable and be able to manage large terrain datasets.}, url = {https://www.crs4.it/vic/data/papers/eg2003-bdam.pdf}, note = {Proc. Eurographics 2003 -- Second Best Paper Award}, } @Article{Gobbetti:2003:HHO, idxstatus = "Published", idxmedium = "Text", idxdatesub = "2003:1", idxdatepub = "2003:9", idxkey = {Globillum}, idxproject = {DIVERCITY}, author = {Enrico Gobbetti and Leonardo Span\`o and Marco Agus}, title = {Hierarchical Higher Order Face Cluster Radiosity for Global Illumination Walkthroughs of Complex Non-Diffuse Environments}, journal = j-CG-FORUM, publisher = pub-BLACKWELL, address = pub-BLACKWELL:adr, year = 2003, volume = 22, number = 3, month = sep, pages = {563--572}, abstract = { We present an algorithm for simulating global illumination in scenes composed of highly tessellated objects with diffuse or moderately glossy reflectance. The solution method is a higher order extension of the face cluster radiosity technique. It combines face clustering, multiresolution visibility, vector radiosity, and higher order bases with a modified progressive shooting iteration to rapidly produce visually continuous solutions with limited memory requirements. The output of the method is a vector irradiance map that partitions input models into areas where global illumination is well approximated using the selected basis. The programming capabilities of modern commodity graphics architectures are exploited to render illuminated models directly from the vector irradiance map, exploiting hardware acceleration for approximating view dependent illumination during interactive walkthroughs. Using this algorithm, visually compelling global illumination solutions for scenes of over one million input polygons can be computed in minutes and examined interactively on common graphics personal computers.}, url = {https://www.crs4.it/vic/data/papers/eg2003-hhofcr.pdf}, note = {Proc. Eurographics 2003}, } @TechReport{Zorcolo:2003:VVD, idxstatus = "Techreport", idxmedium = "Text", idxdatesub = "2003:06", idxdatepub = "2003:06", idxkey = {Haptics, Volren, Surgical}, idxproject = {LAPS}, title = {Visualizzazione Volumetrica Diretta Interattiva con effetti di illuminazione mediante {Register Combiner OpenGL}}, author = {Antonio Zorcolo and Marco Agus and Enrico Gobbetti}, type = {}, number = {CRS4 TR/}, institution = inst-CRS4, address = inst-CRS4:adr, month = jun, year = 2003, url = {https://www.crs4.it/vic/data/papers/laps-tvr-report-2003.pdf}, } @InCollection{Agus:2003:TMS, idxstatus = "Published", idxmedium = "Text", idxdatesub = "2002:12", idxdatepub = "2003:06", idxkey = {Haptics, Surgical}, idxproject = {IERAPSI}, author = {Marco Agus and Andrea Giachetti and Enrico Gobbetti and Gianluigi Zanetti and Antonio Zorcolo}, title = {Tracking the movement of surgical tools in a virtual temporal bone dissection simulator}, booktitle = {Surgery Simulation and Soft Tissue Modeling}, publisher = pub-SV, address = pub-SV:adr, series = ser-LNCS, pages = {102--109}, month = jun, year = 2003, abstract = { In this paper we present the current state of our research on simulation of temporal bone surgical procedures. We describe the results of tests performed on a virtual surgical training system for mid-dle ear surgery. The work is aimed to demonstrate how expert surgeons and trainees can effectively use the system for training and assessment purposes. Preliminary kinematic and dynamic analysis of simulated mastoidectomy sessions are presented. The simulation system used is characterized by a haptic component exploiting a bone-burr contact and erosion simulation model, a direct volume rendering module as well as a time-critical particle system to simulate secondary visual effects, such as bone debris accumulation, blooding, irrigation, and suction.}, note = {}, url = {https://www.crs4.it/vic/data/papers/is4tm-2003.pdf}, } @InCollection{Agus:2003:HMB, idxstatus = "Published", idxmedium = "Text", idxdatesub = "2002:07", idxdatepub = "2003:01", idxkey = {Haptics, Surgical}, idxproject = {IERAPSI}, author = {Marco Agus and Andrea Giachetti and Enrico Gobbetti and Bruno Picasso and Stefano Sellari Franceschini and Gianluigi Zanetti and Antonio Zorcolo}, title = {A haptic model of a bone-cutting burr}, editor = {J. D. Westwood}, booktitle = {Medicine Meets Virtual Reality 2003}, publisher = pub-IOS, address = pub-IOS:adr, pages = {4--10}, isbn = {}, month = jan, year = {2003}, abstract = { We describe a strategy for collecting experimental data and validating a bone-burr haptic contact model developed in a virtual surgical training system for middle ear surgery. The validation strategy is based on the analysis of data acquired during virtual and real burring sessions. Our approach involves intensive testing of the surgical simulator by expert surgeons and trainees as well as experimental data acquisition in a controlled environment.}, url = {https://www.crs4.it/vic/data/papers/mmvr-2003.pdf}, } @Article{Agus:2003:RTH, idxstatus = "Published", idxmedium = "Text", idxdatesub = "2002:05", idxdatepub = "2003:02", idxkey = {TOP-THEME-SURGICAL, Haptics, Surgical, Volren}, idxproject = {IERAPSI}, author = {Marco Agus and Andrea Giachetti and Enrico Gobbetti and Gianluigi Zanetti and Antonio Zorcolo}, title = {Real-time Haptic and Visual Simulation of Bone Dissection}, journal = {Presence: Teleoperators and Virtual Environments}, volume = {12}, number = {1}, month = feb, year = {2003}, pages = {110--122}, abstract = { Bone dissection is an important component of many surgical procedures. In this paper, we discuss a haptic and visual simulation of a bone cutting burr, that is being developed as a component of a training system for temporal bone surgery. We use a physically motivated model to describe the burr bone interaction, that includes haptic forces evaluation, the bone erosion process and the resulting debris. The current implementation, directly operating on a voxel discretization of patient-specific {3D} CT and MR imaging data, is efficient enough to provide real-time feedback on a low-end multi processing PC platform.}, url = {https://www.crs4.it/vic/data/papers/presence-2003.pdf}, note = {} } @InProceedings{Agus:2003:ATR, idxstatus = "Published", idxmedium = "Text", idxdatesub = "2002:08", idxdatepub = "2003:03", idxkey = {Haptics, Volren, Surgical}, idxproject = {IERAPSI}, author = {Marco Agus and Andrea Giachetti and Enrico Gobbetti and Gianluigi Zanetti and Antonio Zorcolo}, title = {Adaptive techniques for real--time haptic and visual simulation of bone dissection}, editor = {}, booktitle = {IEEE Virtual Reality Conference}, publisher = pub-IEEE, address = {Conference held in Los Angeles, CA, USA, March 22--26}, pages = {102--109}, isbn = {}, month = mar, year = 2003, abstract = { Bone dissection is an important component of many surgical procedures. In this paper, we discuss adaptive techniques for providing real-time haptic and visual feedback during a virtual bone dissection simulation. The simulator is being developed as a component of a training system for temporal bone surgery. We harness the difference in complexity and frequency requirements of the visual and haptic simulations by modeling the system as a collection of loosely coupled concurrent components. The haptic component exploits a multi-resolution representation of the first two moments of the bone characteristic function to rapidly compute contact forces and determine bone erosion. The visual component uses a time-critical particle system evolution method to simulate secondary visual effects, such as bone debris accumulation, blooding, irrigation, and suction.}, url = {https://www.crs4.it/vic/data/papers/vr2003-burr.pdf}, note = {} } @TechReport{Agus:2003:IPB, idxstatus = "Techreport", idxmedium = "Text", idxdatesub = "2003:01", idxdatepub = "2003:01", idxkey = {Haptics, Volren, Surgical}, idxproject = {IERAPSI}, title = {{IERAPSI} Petrous bone surgical simulation platform}, author = {Marco Agus and Andrea Giachetti and Enrico Gobbetti and Gianluigi Zanetti and Antonio Zorcolo}, type = {Deliverable}, number = {D4.2}, institution = {EU Project IERAPSI (IST-1999-12175)}, month = jan, year = 2003, url = {https://www.crs4.it/vic/data/papers/ierapsi-d42.pdf}, } @TechReport{Spano:2003:EEH, idxstatus = "Techreport", idxmedium = "Text", idxdatesub = "2003:04", idxdatepub = "2003:04", idxkey = {Globillum}, idxproject = {DIVERCITY}, title = {Empirical Evaluation of Hierarchical Higher Order Face Cluster Radiosity}, author = {Leonardo Span\`o and Enrico Gobbetti}, type = {}, number = {CRS4 TR/}, institution = inst-CRS4, address = inst-CRS4:adr, month = feb, year = 2003, url = {https://www.crs4.it/vic/data/papers/hhofcr-evaluation-tr.pdf}, abstract = { The report evaluates our implementation of the hiearchical higher order face cluster radiosity method and compares it with hierachical radiosity with volume clusters for input scenes composed of highly tessellated surfaces.} } %################################ %### 2002 %################################ @TechReport{Gobbetti:2002:SAR, idxstatus = "Techreport", idxmedium = "Text", idxdatesub = "2002:03", idxdatepub = "2002:03", idxkey = {Terrain, TCR, MR}, idxproject = {VPLANET}, title = {State of the Art Report on Technology for the efficient management of geo-spatial {3D} data on commodity and distributed platform}, author = {Enrico Gobbetti and Riccardo Scateni and Roberto Scopigno}, type = {Deliverable}, number = {D2.2}, institution = {EU Project V-PLANET (IST-2000-28095)}, month = mar, year = 2002, url = {}, } @TechReport{Cignoni:2002:TRT, idxstatus = "Techreport", idxmedium = "Text", idxdatesub = "2002:03", idxdatepub = "2002:03", idxkey = {Terrain, TCR, MR}, idxproject = {VPLANET}, title = {Technical Report on time critical components}, author = {Paolo Cignoni and Fabio Ganovelli and Enrico Gobbetti and Fabio Marton and Federico Ponchio and Roberto Scopigno}, type = {Deliverable}, number = {D4.2}, institution = {Deliverable D5.1, EU Project V-PLANET (IST-2000-28095)}, month = nov, year = 2002, url = {}, } @TechReport{Gobbetti:2002:HHO, idxstatus = "Techreport", idxmedium = "Text", idxdatesub = "2002:03", idxdatepub = "2002:03", idxkey = {Globillum}, idxproject = {DIVERCITY}, author = {Enrico Gobbetti and Leonardo Span\`o and Marco Agus}, title = {Hierarchical Higher Order Face Cluster Radiosity}, institution = inst-CRS4, address = inst-CRS4:adr, year = 2002, number = {CRS4 TR/}, month = mar, url = {https://www.crs4.it/vic/data/papers/crs4-report-hhofcr-2002.pdf} } @InProceedings{Agus:2002:SMP, idxstatus = "Published", idxmedium = "Text", idxdatesub = "2002:09", idxdatepub = "2002:10", idxkey = {Volren, Surgical}, idxproject = {IERAPSI}, author = {Marco Agus and Andrea Giachetti and Enrico Gobbetti and Gianluigi Zanetti and Antonio Zorcolo}, title = {Un sistema multiprocessore per la simulazione della chirurgia sull'osso temporale}, editor = {}, booktitle = {Proceedings of the Second MIMOS Conference}, publisher = {}, address = {Conference held in Turin, Italy, October 28--29, 2002}, pages = {}, isbn = {}, month = oct, year = {2002}, abstract = { Nel presente articolo si presenta un simulatore per l'addestramento alla chirurgia dell'osso temporale. Il sistema si basa su modelli volumetrici direttamente derivati da dati {3D} di TAC e MR. Il ritorno di sensazioni in tempo reale viene fornito all'utente per mezzo di tecniche di rendering volumetrico e di modellazione di sensazioni aptiche. I vincoli nelle prestazioni imposti dal sistema percettivo umano sono soddisfatti sfruttando il parallelismo attraverso il disaccoppiamento della simulazione su una piattaforma di PC multi-processore. In quest'articolo, vengono descritti in dettaglio i componenti del sistema e lo stato attuale dell'integrazione dei medesimi.}, url = {https://www.crs4.it/vic/data/papers/mimos-2002.pdf} } @InProceedings{Giachetti:2002:BDM, idxstatus = "Published", idxmedium = "Text", idxdatesub = "2002:03", idxdatepub = "2002:07", idxkey = {Volren, Surgical}, idxproject = {IERAPSI}, author = {Andrea Giachetti and Marco Agus and Enrico Gobbetti and Gianluigi Zanetti}, title = {Blood, dust and mud for a temporal bone surgical simulator}, editor = {}, booktitle = {Proceedings of the 5th Conference of the European Society of Mathematical and Theoretical Biology, Poster sessions}, publisher = {}, address = {Conference held in Milan, Italy, July 2--6, 2002}, pages = {}, isbn = {}, month = jul, year = {2002}, abstract = { We describe the methods used for the simulation of fluids and dust applied in an immersive virtual reality simulation system giving the visual and tactile feedback of the mastoid bone surgery. A particle system has been used to simulate water, blood and dust; each particle has a label describing its material and a different physical behaviour. Water particles are introduced by an irrigator with an initial velocity directed along the irrigator axis, dust particles are generated by the burr performing the surgical bone drilling with an initial velocity depending on the rotation of the burr itself, blood is generated by tissues with negligible initial speed. Particles then move according to Newton's law but are interacting with each other and with bone and other tissues represented as a voxelized volume. To reach real-time performance, all the interactions, even particle-particle interactions are represented as voxel-particle interactions.}, url = {} } @InProceedings{Agus:2002:HSB, idxstatus = "Published", idxmedium = "Text", idxdatesub = "2001:12", idxdatepub = "2002:05", idxkey = {Haptics, Surgical, Volren}, idxproject = {IERAPSI}, author = {Marco Agus and Andrea Giachetti and Enrico Gobbetti and Gianluigi Zanetti and Antonio Zorcolo}, title = {Haptic Simulation of Bone Dissection}, editor = {}, booktitle = {Abstract -- SIMAI 2002 Symposium on Methods and Applications in Advanced Computational Mechanics}, publisher = {}, address = {Conference held in Chia, CA, Italy, May 27--31, 2002}, pages = {}, isbn = {}, month = may, year = {2002}, abstract = { Bone dissection is an important component of many surgical procedures. We discuss a haptic implementation of a bone cutting burr, that it is being developed as a component of a training system for temporal bone surgery. We use a physically motivated model to describe the burr-bone interaction process. The model includes haptic forces evaluation, the bone erosion process and the resulting debris. The current implementation, directly operating on a voxel discretization of patient-specific {3D} imaging data, is efficient enough to provide real--time feedback on a low end multi processing PC platform. This research is supported by the IERAPSI project (EU-IST-1999-12175), funded under the European IST programme (Information Society Technologies).}, url = {https://www.crs4.it/vic/data/papers/simai2002-ierapsi.pdf}, } @InProceedings{Spano:2002:RHT, idxstatus = "Published", idxmedium = "Text", idxdatesub = "2002:02", idxdatepub = "2002:05", idxkey = {Globillum}, idxproject = {DIVERCITY}, author = {Leonardo Span\`o and Enrico Gobbetti}, title = {Radiosity for Highly Tessellated Models}, editor = {}, booktitle = {SIMAI 2002 Symposium on Adaptive Techniques in Numerical Simulation and Data Processing}, publisher = {}, address = {Conference held in Chia, CA, Italy, May 27--31, 2002}, pages = {}, isbn = {}, month = may, year = {2002}, abstract = { The radiosity method is one of the methods of choice used in global illumination simulation. It is a finite element technique that is particularly well suited for computing the radiance distribution in an environment exhibiting only diffuse reflection and emission. We discuss a multiresolution implementation of the technique, that has been developed to rapidly compute radiosity solutions for scenes composed of highly tessellated models. The application context is an interactive lighting design tool being developed in the framework of the DIVERCITY project (EU-IST-13365), funded under the European IST programme (Information Society Technologies).}, url = {https://www.crs4.it/vic/data/papers/simai2002-divercity.pdf}, } @TechReport{Agus:2002:ISS, idxstatus = "Techreport", idxmedium = "Text", idxdatesub = "2002:02", idxdatepub = "2002:02", idxkey = {Haptics, Volren, Surgical}, idxproject = {IERAPSI}, title = {{IERAPSI} Surgical Simulator Software Kernel}, author = {Marco Agus and Andrea Giachetti and Enrico Gobbetti and Gianluigi Zanetti}, type = {Deliverable}, number = {D4.1}, institution = {EU Project IERAPSI (IST-1999-12175)}, month = feb, year = 2002, url = {https://www.crs4.it/vic/data/papers/ierapsi-d41.pdf}, } @Article{Agus:2002:MDS, idxstatus = "Published", idxmedium = "Text", idxdatesub = "2001:08", idxdatepub = "2002:12", idxkey = {Haptics, Volren, Surgical}, idxproject = {IERAPSI}, author = {Marco Agus and Andrea Giachetti and Enrico Gobbetti and Gianluigi Zanetti and Antonio Zorcolo}, title = {A multiprocessor decoupled system for the simulation of temporal bone surgery}, journal = {Computing and Visualization in Science}, issn = {1432-9360}, publisher = pub-SV, address = pub-SV:adr, volume = {5}, number = {1}, pages = {35--43}, month = {}, year = {2002}, abstract = { A training system for simulating temporal bone surgery is presented. The system is based on patient-specific volumetric object models derived from {3D} CT and MR imaging data. Real-time feedback is provided to the trainees via real-time volume rendering and haptic feedback. The performance constraints dictated by the human perceptual system are met by exploiting parallelism via a decoupled simulation approach on a multi-processor PC platform. In this paper, system components are detailed and the current state of the integrated system is presented.}, keywords = {Surgical simulation, temporal bone dissection, decoupled simulation model, hardware volume rendering, haptic feedback}, url = {https://www.crs4.it/vic/data/papers/cvs2002-ierapsi.pdf}, } @InProceedings{Agus:2002:RTH, idxstatus = "Published", idxmedium = "Text", idxdatesub = "2001:08", idxdatepub = "2002:02", idxkey = {Haptics, Volren, Surgical}, idxproject = {IERAPSI}, author = {Marco Agus and Andrea Giachetti and Enrico Gobbetti and Gianluigi Zanetti and Antonio Zorcolo}, title = {Real-time Haptic and Visual Simulation of Bone Dissection}, editor = {}, booktitle = {IEEE Virtual Reality Conference}, publisher = pub-IEEE, address = {Conference held in Orlando, FL, USA, March 24--28}, pages = {209--216}, isbn = {}, month = feb, year = 2002, abstract = { Bone dissection is an important component of many surgical procedures. In this paper, we discuss a haptic and visual implementation of a bone cutting burr, that it is being developed as a component of a training system for temporal bone surgery. We use a physically motivated model to describe the burr-bone interaction, that includes haptic forces evaluation, the bone erosion process and the resulting debris. The current implementation, directly operating on a voxel discretization of patient-specific {3D} CT and MR imaging data, is efficient enough to provide real-time feedback on a low-end multi-processing PC platform.}, url = {https://www.crs4.it/vic/data/papers/vr2002-burr.pdf}, } @InCollection{Agus:2002:MSC, idxstatus = "Published", idxmedium = "Text", idxdatesub = "2001:07", idxdatepub = "2002:01", idxkey = {Haptics, Volren, Surgical}, idxproject = {IERAPSI}, author = {Marco Agus and Andrea Giachetti and Enrico Gobbetti and Gianluigi Zanetti and Nigel W. John and Robert J. Stone}, title = {Mastoidectomy Simulation with Combined Visual and Haptic Feedback}, editor = {J. D. Westwood and H. M. Hoffmann and G. T. Mogel and D. Stredney}, booktitle = {Medicine Meets Virtual Reality 2002}, publisher = pub-IOS, address = pub-IOS:adr, pages = {17--23}, isbn = {1-58603-203-8}, month = jan, year = 2002, abstract = { Mastoidectomy is one of the most common surgical procedures relating to the petrous bone. In this paper we describe our preliminary results in the realization of a virtual reality mastoidectomy simulator. Our system is designed to work on patient-specific volumetric object models directly derived from {3D} CT and MRI images. The paper summarizes the detailed task analysis performed in order to define the system requirements, introduces the architecture of the prototype simulator, and discusses the initial feedback received from selected end users.}, url = {https://www.crs4.it/vic/data/papers/mmvr2002-ierapsi.pdf}, } @InCollection{Gobbetti:2002:HHT, idxstatus = "Published", idxmedium = "Text", idxdatesub = "2001:07", idxdatepub = "2002:02", idxkey = {Surgical}, idxproject = {LAPS}, author = {Enrico Gobbetti and Gianluigi Zanetti and Riccardo Scateni}, title = {Head and Hand Tracking Devices in Virtual Reality}, booktitle = {{3D} Image Processing: Techniques and Clinical Applications}, pages = {287--292}, publisher = pub-SV, address = pub-SV:adr, abstract = {This short introduction to head and hand tracking devices summarizes the characteristics of some of the technologies most relevant to medical application and presents the configuration chosen in three representative test beds.}, month = feb, year = {2002}, isbn = {3-540-67470-5}, editor = {D. Caramella and C. Bartolozzi}, url = {https://www.crs4.it/vic/data/papers/hhtvr2001.pdf}, } @Article{Jackson:2002:DVR, idxstatus = "Published", idxmedium = "Text", idxdatesub = "2000:05", idxdatepub = "2002:03", idxkey = {Haptics, Volren, Surgical}, idxproject = {IERAPSI}, author = {Alan Jackson and Nigel W. John and Neil A. Thacker and John E. Gillespie and Enrico Gobbetti and Gianluigi Zanetti and Robert Stone and Alf D. Linney and Ghassan H. Alusi and Stefano Sellari Franceschini and Armin Schwerdtner and Ad Emmen}, title = {Developing a virtual reality environment for petrous bone surgery: a state-of-the-art review}, journal = {Journal of Otology \& Neurotology}, publisher = {Lippincott Williams \& Wilkins}, address = {Baltimore, MD, USA}, volume = {23}, number = {2}, pages = {111--121}, month = mar, year = {2002}, abstract = {The increasing power of computers has led to the development of sophisticated systems that aim to immerse the user in a virtual environment. The benefits of this type of approach to the training of physicians and surgeons are immediately apparent. Unfortunately the implementation of virtual reality (VR) surgical simulators has been restricted by both cost and technical limitations. The few successful systems use standardized scenarios, often derived from typical clinical data, to allow the rehearsal of procedures. In reality we would choose a system that allows us not only to practice typical cases but also to enter our own patient data and use it to define the virtual environment. In effect we want to re-write the scenario every time we use the environment and to ensure that its behavior exactly duplicates the behavior of the real tissue. If this can be achieved then VR systems can be used not only to train surgeons but also to rehearse individual procedures where variations in anatomy or pathology present specific surgical problems. The European Union has recently funded a multinational 3-year project (IERAPSI, Integrated Environment for Rehearsal and Planning of Surgical Interventions) to produce a virtual reality system for surgical training and for rehearsing individual procedures. Building the IERAPSI system will bring together a wide range of experts and combine the latest technologies to produce a true, patient specific virtual reality surgical simulator for petrous/temporal bone procedures. This article presents a review of the state of the art technologies currently available to construct a system of this type and an overview of the functionality and specifications such a system requires.}, keywords = {}, url = {https://www.crs4.it/vic/data/papers/j-otology-2001.pdf} } %################################ %### 2001 %################################ @Article{Aspin:2001:CFM, idxstatus = "Published", idxmedium = "Text", idxdatesub = "2001:08", idxdatepub = "2001:11", idxkey = {Globillum}, idxproject = {DIVERCITY}, author = {Rob Aspin and Laurent DaDalto and Terrence Fernando and Enrico Gobbetti and Mathieu Marache and Mark Shelbourn and Souheil Soubra}, title = {A conceptual framework for multi-modal interactive virtual workspaces}, journal = {Electronic Journal of Information Technology in Construction}, publisher = {Royal Institute of Technology}, address = {Stockholm, Sweden}, volume = 7, number = {}, pages = {149--159}, month = nov, year = 2001, abstract = { Construction projects involve a large number of both direct stakeholders (clients, professional teams, contractors, etc.) and indirect stakeholders (local authorities, residents, workers, etc.). Current methods of communicating building design information can lead to several types of difficulties (e.g. incomplete understanding of the planned construction, functional inefficiencies, inaccurate initial work or clashes between components, etc.). Integrated software solutions based on VR technologies can bring significant value improvement and cost reduction to the Construction Industry. The aim of this paper is to present the research carried out in the frame of the DIVERCITY project (Distributed Virtual Workspace for Enhancing Communication within the Construction Industry - IST project n°13365), funded under the European IST programme (Information Society Technologies). DIVERCITY's goal is to develop a Virtual Workspace that addresses the three key building construction phases: 1. Client briefing (with detailed interaction client and architect); 2. Design Review (which requires detailed input from multidisciplinary teams - architects, engineers, facility managers, etc.); 3. Construction (aiming to fabricate or refurbish the building). Using a distributed architecture, the DIVERCITY system aims to support and enhance concurrent engineering practices for these three phases therefore allowing teams based in different geographic locations to collaboratively design, test and validate shared virtual projects. The global DIVERCITY project will be presented in terms of objectives and the software architecture will be detailed.}, keywords = {}, url = {https://www.crs4.it/vic/data/papers/itcon-2001.pdf}, } @TechReport{Dalto:2001:DSD, idxstatus = "Techreport", idxmedium = "Text", idxdatepub = "2001:08", idxkey = {Globillum}, idxproject = {DIVERCITY}, title = {{DIVERCITY} System Design: Communication Services and Multiresolution Module}, author = {Laurent Da Dalto and Enrico Gobbetti}, type = {Deliverable}, number = {D16/17}, institution = {EU Project DIVERCITY (IST-1999-13365)}, month = aug, year = 2001 } @TechReport{Aspin:2001:DWA, idxstatus = "Techreport", idxmedium = "Text", idxdatepub = "2001:08", idxkey = {Globillum}, idxproject = {DIVERCITY}, title = {{DIVERCITY} Workspace Alpha Phase: Client Briefing Workspace, Design Review Workspace, and Construction Workspace}, author = {Rob Aspin and Laurent DaDalto and Terrence Fernando and Enrico Gobbetti and Mathieu Marache and Mark Shelbourn and Souheil Soubra}, type = {Deliverable}, number = {D13/14/15}, institution = {EU Project DIVERCITY (IST-1999-13365)}, month = aug, year = 2001 } @InProceedings{Bettio:2001:IDR, idxstatus = "Published", idxmedium = "Text", idxdatesub = "2001:04", idxdatepub = "2001:10", idxkey = {Surgical, Volren}, idxproject = {LAPS, BRAIN}, author = {Fabio Bettio and Sergio Demelio and Enrico Gobbetti and Giuseppe Luppino and Massimo Matelli}, title = {Interactive {3-D} Reconstruction and Visualization of Primates Cerebral Cortex}, editor = {}, booktitle = {Society for Neuroscience 31st Annual Meeting}, pages = {}, publisher = {}, series = {}, address = {Abstract -- Conference held in San Diego, CA, USA, November 10-15}, isbn = {}, month = oct, year = {2001}, abstract = { We have developed software for 3-D reconstruction and visualization of architectonic, neurophysiological and tract tracing data of primate cerebral cortex. Our emphasis is on providing interactive solutions to reconstruction and analysis problems by harnessing the power of new generation commodity graphics accelerators. We take as input data acquired in serially collected individual sections, in which the locations of outer and inner cortical boundaries, architectonic borders, electrode tracks, and labeled neurons are coded in X-Y coordinates. Linear and non-linear local transformations are interactively applied to sections for aligning them and minimizing distortions introduced by sectioning and histological procedures. Cortical surfaces are incrementally reconstructed during section manipulation using both direct triangulation and functional techniques. Using multipass rendering techniques, the system interactively generates realistic 3-D views that present in the same image the location of architectonic areas and the spatial location, density and cortical depth of the introduced data. The reconstructed brain can be resliced according to arbitrary planes and virtually dissected for showing sulcal banks without distortion. Application of this software to the analysis of data from our previous studies showed that this integrated system is a powerful tool for anatomo-functional correlation in highly convoluted brains and for comparing data from brains cut along different sectioning plans. The availability of low-cost, high performance graphics PC platforms makes this approach practical for everyday laboratory work.}, url = {}, } @InCollection{Demelio:2001:TDR, idxstatus = "Published", idxmedium = "Text", idxdatesub = "2000:12", idxdatepub = "2001:05", idxkey = {Surgical, Volren}, idxproject = {LAPS, BRAIN}, author = {Sergio Demelio and Fabio Bettio and Enrico Gobbetti and Giuseppe Luppino}, title = {Three-dimensional Reconstruction and Visualization of the Cerebral Cortex in Primates}, editor = {David Ebert and Jean Favre and Ronny Peikert}, booktitle = {Data Visualization 2001}, pages = {}, publisher = pub-SV, series = ser-EG, address = pub-SV:adr, note ={Proceedings of the Joint Eurographics and IEEE TCVG Symposium on Visualization, Ascona, Switzerland, May 28--30, 2001}, isbn = {}, month = may, year = {2001}, abstract = { We present a prototype interactive application for the direct analysis in three dimensions of the cerebral cortex in primates. The paper provides an overview of the current prototype system and presents the techniques used for reconstructing the cortex shape from data derived from histological sections as well as for rendering it at interactive rates. Results are evaluated by discussing the analysis of the right hemisphere of the brain of a macaque monkey used for neuroanatomical tract-tracing experiments.}, url = {https://www.crs4.it/vic/data/papers/vissym01-brain.pdf}, } @Article{Gobbetti:2001:EVP, idxstatus = "Published", idxmedium = "Text", idxdatesub = "2000:10", idxdatepub = "2001:01", idxkey = {TCR}, idxproject = {CAVALCADE, DIVERCITY}, author = {Enrico Gobbetti and Riccardo Scateni}, title = {Exploring Virtual Prototypes using Time-Critical Rendering Techniques}, journal = {ERCIM News}, publisher = {ERCIM EEIG}, address = {B. P. 93, F-06902 Sophia-Antipolis Cedex, France}, volume = {}, number = {44}, pages = {46--47}, month = jan, year = 2001, abstract = { Scientists at CRS4, the Center for Advanced Studies, Research and Development in Sardinia, Cagliari, Italy, have developed a time-critical rendering algorithm that relies upon a scene description in which objects are represented as multiresolution meshes. In collaboration with other European partners, this technique has been applied to the visual and collaborative exploration of large digital mock-ups.}, keywords = {}, url = {https://www.crs4.it/vic/data/papers/ercim-2001.pdf}, } @Article{Bouvier:2001:TTO, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1999:11", idxdatepub = "2001:01", idxkey = {TCR}, idxproject = {CAVALCADE}, author = {Eric Bouvier and Enrico Gobbetti}, title = {{TOM} -- Totally Ordered Mesh: a Multiresolution Structure for Time-Critical Graphics Applications}, journal = {International Journal of Image and Graphics}, publisher = pub-WORLD-SCI, address = pub-WORLD-SCI:adr, volume = 1, number = 1, pages = {115--134}, month = jan, year = 2001, abstract = { Tridimensional interactive applications are confronted to situations where very large databases have to be animated, transmitted and displayed in very short bounded times. As it is generally impossible to handle the complete graphics description while meeting timing constraint, techniques enabling the extraction and manipulation of a significant part of the geometric database have been the focus of many research works in the field of computer graphics. Multiresolution representations of {3D} models provide access to {3D} objects at arbitrary resolutions while minimizing appearance degradation. Several kinds of data structures have been recently proposed for dealing with polygonal or parametric representations, but where not generally optimized for time-critical applications. We describe the {TOM} (Totally Ordered Mesh), a multiresolution triangle mesh structure tailored to the support of time-critical adaptive rendering. The structure grants high speed access to the continuous levels of detail of a mesh and allows very fast traversal of the list of triangles at arbitrary resolution so that bottlenecks in the graphic pipeline are avoided. Moreover, and without specific compression, the memory footprint of the {TOM} is small (about 108\% of the single resolution object in face-vertex form) so that large scenes can be effectively handled. The {TOM} structure also supports storage of per vertex (or per corner of triangle) attributes such as colors, normals, texture coordinates or dynamic properties. Implementation details are presented along with the results of tests for memory needs, approximation quality, timing and efficacy.}, keywords = {}, url = {https://www.crs4.it/vic/data/papers/ijig00-tom.pdf} } @InCollection{Agus:2001:IES, idxstatus = "Published", idxmedium = "Text", idxdatesub = "2000:07", idxdatepub = "2001:01", idxkey = {}, idxproject = {MIRRORS}, author = {Marco Agus and Fabio Bettio and Enrico Gobbetti and Luciano Fadiga}, title = {An Integrated Environment for Steroscopic Acquisition, Off-line {3D} Elaboration, and Visual Presentation of Biological Actions}, editor = {J. D. Westwood and H. M. Hoffmann and G. T. Mogel and D. Stredney and R. A. Robb}, booktitle = {Medicine Meets Virtual Reality 2001 -- Inner Space, Outer Space, Virtual Space}, publisher = pub-IOS, address = pub-IOS:adr, pages = {23--29}, isbn = {}, month = jan, year = 2001, abstract = { We present an integrated environment for stereoscopic acquisition, off-line {3D} elaboration, and visual presentation of biological hand actions. The system is used in neurophysiological experiments aimed at the investigation of the parameters of the external stimuli that mirror neurons visually extract and match on their movement related activity.}, url = {https://www.crs4.it/vic/data/papers/mmvr-2001-hfsp.pdf}, } @InProceedings{John:2001:ISS, idxstatus = "Published", idxmedium = "Text", idxdatesub = "2000:07", idxdatepub = "2001:01", idxkey = {Haptics, Volren, Surgical}, idxproject = {IERAPSI}, author = {Nigel W. John and Neil Thacker and Maja Pokric and Alan Jackson and Gianluigi Zanetti and Enrico Gobbetti and Andrea Giachetti and Robert Stone and Joao Campos and Ad Emmen and Armin Schwerdtner and Emanuele Neri and Stefano Sellari Franceschini and Frederic Rubio}, title = {An Integrated Simulator for Surgery of the Petrous Bone}, editor = {J. D. Westwood}, booktitle = {Medicine Meets Virtual Reality 2001}, publisher = pub-IOS, address = pub-IOS:adr, isbn = {}, month = jan, year = 2001, abstract = { This paper describes work being undertaken as part of the IERAPSI (Integrated Environment for the Rehearsal and Panning of Surgical Intervention) project. The project is focussing on surgery for the petrous bone, and brings together a consortium of European clinicians and technology providers working in this field. The paper presents the results of a comprehensive user task analysis that has been carried out in the first phase of the IERAPSI project, and details the current status of development of a pre operative planning environment and a physically-based surgical simulator.}, url = {https://www.crs4.it/vic/data/papers/mmvr-2001-ierapsi.pdf}, } %################################ %### 2000 %################################ @InProceedings{Soubra:2000:VID, idxproject = {DIVERCITY}, author = {Souheil Soubra and Florent Coudret and J\'er\^ome Duchon and Enrico Gobbetti}, title = {Virtual Integrated Design and Construction}, booktitle = {Construction Information Technology}, year = 2000, pages = 884, abstract = { Recent surveys in the construction industry (e.g. Egan Report [1]) show that significant value improvement and cost reduction can be gained by substantially integrated solutions being applied by project teams as a mean of reengineering the project process. In particular, these surveys show that there are considerable benefits to be gained by integration of the “design” and “construction” processes. After a brief presentation of potential applications of Virtual Reality (VR) in the construction industry, the paper will focus on the issue of “collaborative virtual prototyping”. In particular, the research carried out in the frame of the CAVALCADE 1 project, funded under the European ESPRIT programme (HPCN 2 domain), will be presented. Special emphasis will be put on the concurrent engineering features of CAVALCADE allowing project teams, based in geographically distant locations, to collaboratively design, test and validate shared virtual prototypes. As a conclusion, the objectives of new IST project (DIVERCITY 3 - Distributed Virtual Workspace for enhancing Communication within the Construction Industry) that will build, among others, on the results of CAVALCADE, will then be presented. }, url = {https://www.crs4.it/vic/data/papers/cit2000-divercity.pdf}, ISBN = {9979-9174-3-1} } @TechReport{Aspin:2000:DSD, idxstatus = "Techreport", idxmedium = "Text", idxdatepub = "2000:09", idxkey = {Globillum}, idxproject = {DIVERCITY}, title = {{DIVERCITY} Software Design: Client Briefing Workspace, Design Review Workspace, and Construction Workspace}, author = {Rob Aspin and Laurent DaDalto and Terrence Fernando and Enrico Gobbetti and Mathieu Marache and Mark Shelbourn and Souheil Soubra}, type = {Deliverable}, number = {D8/9/10}, institution = {EU Project DIVERCITY (IST-1999-13365)}, month = sep, year = 2000 } @TechReport{Aspin:2000:DSA, idxstatus = "Techreport", idxmedium = "Text", idxdatepub = "2000:09", idxkey = {Globillum}, idxproject = {DIVERCITY}, title = {{DIVERCITY} System Architecture Definition: H/W and S/W Options}, author = {Rob Aspin and Laurent Da Dalto and Terrence Fernando and Enrico Gobbetti and Mathieu Marache and Mark Shelbourn and Souheil Soubra}, type = {Deliverable}, number = {D4}, institution = {EU Project DIVERCITY (IST-1999-13365)}, month = sep, year = 2000 } @TechReport{John:2000:ISP, idxstatus = "Techreport", idxmedium = "Text", idxdatepub = "2000:07", idxkey = {Haptics, Volren, Surgical}, idxproject = {IERAPSI}, title = {{IERAPSI} Surgical Procedures and Implementation Specification}, author = {Nigel W. John and Neil A. Thacker and Maja Pokric and Marco Agus and Andrea Giachetti and Enrico Gobbetti and Gianluigi Zanetti and Robert J. Stone and Manfred Kummer and Frederic Rubio}, type = {Deliverable}, number = {D2}, institution = {EU Project IERAPSI (IST-1999-12175)}, month = jul, year = 2000 } @TechReport{Demelio:2000:RVC, idxstatus = "Techreport", idxmedium = "Text", idxdatesub = "2000:09", idxdatepub = "2000:09", idxkey = {Volren, Surgical}, idxproject = {LAPS}, author = {Sergio Demelio and Enrico Gobbetti}, title = {Ricostruzione e Visualizzazione {3D} di un Cervello da Acquisizioni Manuali di Sezioni Istologiche}, institution = inst-CRS4, address = inst-CRS4:adr, year = 2000, number = {CRS4 TR/}, month = sep, url = {https://www.crs4.it/vic/data/papers/crs4-tr-09-2000.pdf}, thumbnail = {https://www.crs4.it/vic/img/thumb-none.jpg}, } @TechReport{Agus:2000:CPR, idxstatus = "Techreport", idxmedium = "Text", idxdatesub = "2000:06", idxdatepub = "2000:06", idxkey = {}, idxproject = {MIRRORS}, author = {Marco Agus and Fabio Bettio and Enrico Gobbetti}, title = {Creating and Presenting Real and Artificial Visual Stimuli for the Neurophysiological Investigation of the Observation/Execution Matching System}, abstract = { Recent neurophysiological experiments have shown that the visual stimuli that trigger a particular kind of neurons located in the ventral premotor cortex of monkeys and humans are very selective. These \textit{mirror neurons} are activated when the hand of another individual interacts with an objects but are not activated when the actions, identical in purpose, are made by manipulated mechanical tools. A Human Frontiers Science Program project is investigating which are the parameters of the external stimuli that mirror neurons visually extract and match on their movement related activity. The planned neurophysiological experiments will require the presentation of digital stimuli of different kinds, including video sequences showing meaningful actions made by human hands, synthetic reproductions of the same actions made by realistic virtual hands, as well as variations of the same actions by controlled modifications of hand geometry and/or action kinematics. This paper presents the specialized animation system we have developed for the project.}, institution = inst-CRS4, address = inst-CRS4:adr, number = {00/}, year = {2000}, month = jun, url = {https://www.crs4.it/vic/data/papers/hfsp-report-2000-06.pdf}, thumbnail = {https://www.crs4.it/vic/img/thumb-none.jpg}, } @Article{Torguet:2000:CSC, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1999:11", idxdatepub = "2000:06", idxkey = {TCR}, idxproject = {CAVALCADE}, author = {Patrice Torguet and Olivier Balet and Enrico Gobbetti and Jean-Pierre Jessel and J\'er\^ome Duchon and Eric Bouvier}, title = {{CAVALCADE}: A system for Collaborative Prototyping}, journal = {International Journal of Design and Innovation Research}, publisher = {}, address = {}, volume = 2, number = 1, pages = {76--89}, month = {}, year = 2000, abstract = { Prototype design and testing is an indispensable stage of any project development in many fields of activity, such as aeronautical, spatial, automotive industries or architecture. Scientists and engineers rely on prototyping for a visual confirmation and validation of both their ideas and concepts. Using computers for designing digital prototypes is not a new idea since CAD applications are nowadays widely used. In this paper we present how new advances in {3D} interaction and real time visualisation research domains lead to the development of a collaborative and really interactive system for virtual prototyping. This work is supported by the European Community through the ESPRIT programme 4.}, keywords = {}, note = {Revised version of Laval'99 conference paper}, MISSINGurl = {https://www.crs4.it/vic/data/papers/???} } @Article{Montani:2000:DIC, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1998:11", idxdatepub = "2000:02", idxkey = {SCIVIZ}, idxproject = {}, author = {Claudio Montani and Riccardo Scateni and Roberto Scopigno}, title = {Decreasing Isosurface Complexity via Discrete Fitting}, journal = {Journal of Computer-Aided Geometric Design}, publisher = pub-ELS, address = pub-ELS:adr, volume = 17, number = 3, pages = {207--232}, month = feb, year = 2000, abstract = {}, keywords = {}, url = {}, } @Article{Gobbetti:2000:TCM, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1999:11", idxdatepub = "2000:11", idxkey = {TCR}, idxproject = {CAVALCADE}, author = {Enrico Gobbetti and Eric Bouvier}, title = {Time-Critical Multiresolution Rendering of Large Complex Models}, journal = {Journal of Computer-Aided Design}, publisher = pub-ELS, address = pub-ELS:adr, volume = 32, number = 13, pages = {785--803}, month = nov, year = 2000, abstract = { Very large and geometrically complex scenes, exceeding millions of polygons and hundreds of objects, arise naturally in many areas of interactive computer graphics. Time-critical rendering of such scenes requires the ability to trade visual quality with speed. Previous work has shown that this can be done by representing individual scene components as multiresolution triangle meshes, and performing at each frame a convex constrained optimization to choose the mesh resolutions that maximize image quality while meeting timing constraints. In this paper we demonstrate that the nonlinear optimization problem with linear constraints associated to a large class of quality estimation heuristics is efficiently solved using an active-set strategy. By exploiting the problem structure, Lagrange multipliers estimates and equality constrained problem solutions are computed in linear time. Results show that our algorithms and data structures provide low memory overhead, smooth level-of-detail control, and guarantee, within acceptable limits, a uniform, bounded frame rate even for widely changing viewing conditions. Implementation details are presented along with the results of tests for memory needs, algorithm timing, and efficacy.}, keywords = {multiresolution modeling, level-of-detail, adaptive rendering, numerical optimization, time-critical graphics}, url = {https://www.crs4.it/vic/data/papers/jcad00-tcr.pdf} } @InCollection{Zorcolo:2000:VVE, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1999:12", idxdatepub = "2000:06", idxkey = {Volren, Surgical, Haptics}, idxproject = {VIVA}, author = {Antonio Zorcolo and Enrico Gobbetti and Gianluigi Zanetti and Massimiliano Tuveri}, title = {A Volumetric Virtual Environment for Catheter Insertion Simulation}, booktitle = {Virtual Environments 2000}, editor = {Robert van Liere and Juriaan Mulder}, publisher = pub-SV, series = ser-LNCS, month = jun, year = {2000}, abstract = { We present an experimental catheter insertion simulation system that provides users co-registered haptic and head-tracked stereoscopic visual feedback. The system works on patient-specific volumetric data acquired using standard medical imaging modalities. The actual needle insertion operation is simulated for individual patients, rather than being an example of a model surgical procedure on standard anatomy. Patient specific features may thus be studied in detail by the trainees, overcoming one of the major limitations of current training techniques.}, url = {https://www.crs4.it/vic/data/papers/egve00-catheter.pdf} } @InCollection{Zorcolo:2000:CIS, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1999:09", idxdatepub = "2000:01", idxkey = {Volren, Surgical, Haptics}, idxproject = {VIVA}, author = {Antonio Zorcolo and Enrico Gobbetti and Gianluigi Zanetti and Massimiliano Tuveri}, editor = {J. D. Westwood and H. M. Hoffman and G. T. Mogel and R. A. Robb and D. Stredney}, title = {Catheter insertion simulation with co-registered direct volume rendering and haptic feedback}, booktitle = {Medicine Meets Virtual Reality 2000 -- Envisioning Healing: Interactive Technology and the Patient-Practioner Dialogue}, pages = {96--98}, publisher = pub-IOS, address = pub-IOS:adr, isbn = {1-58603-014-0}, month = jan, year = {2000}, abstract = { We have developed an experimental catheter insertion simulation system supporting head-tracked stereoscopic viewing of volumetric anatomic reconstructions registered with direct haptic {3D} interaction. The system takes as input data acquired with standard medical imaging modalities and regards it as a visual and haptic environment whose parameters are interactively defined using look-up tables. The system's display, positioned like a surgical table, provide a realistic impression of looking down at the patient. Measuring head motion via a six degrees-of-freedom head tracker, good positions to observe the anatomy and identify the catheter insertion point are quickly established with simple head motion. By generating appropriate stereoscopic images and co-registering physical and virtual spaces beforehand, volumes appear at fixed physical positions and it is possible to control catheter insertion via direct interaction with a PHANToM haptic device. During the insertion procedure, the system provides perception of the effort of penetration and deviation inside the traversed tissues. Semi-transparent volumetric rendering augments the sensory feedback with the visual indication of the inserted catheter position inside the body.}, url = {https://www.crs4.it/vic/data/papers/mmvr00-catheter.pdf}, } @TechReport{Duchon:2000:CFR, idxstatus = "Techreport", idxmedium = "Text", idxdatepub = "2000:02", idxkey = {TCR}, idxproject = {CAVALCADE}, title = {{CAVALCADE} Final Report}, author = {Jerome Duchon and Jacques Coves and Enrico Gobbetti and Riccardo Scateni and Luciano Marenzi and Jean-Pierre Jessel and Souheil Soubra and Joop De Kruyf and Philippe David and Carlos Cosials Ruiz}, type = {Deliverable}, number = {D1C}, institution = {EU Project CAVALCADE (ESPRIT-26285)}, month = feb, year = 2000 } %################################ %### 1999 %################################ @TechReport{Gobbetti:1999:EVP, idxstatus = "Techreport", idxmedium = "Text", idxdatesub = "1999:11", idxdatepub = "1999:11", idxkey = {TCR}, idxproject = {CAVALCADE}, author = {Enrico Gobbetti and Riccardo Scateni and Marco Agus}, title = {Exploring Virtual Prototypes using Time-critical Rendering}, institution = inst-CRS4, address = inst-CRS4:adr, year = 1999, number = {CRS4 TR/}, month = nov, url = {https://www.crs4.it/vic/data/papers/crs4-tr-11-1999.pdf}, thumbnail = {https://www.crs4.it/vic/img/thumb-none.jpg}, } @InProceedings{Gobbetti:1999:TCM, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1999:03", idxdatepub = "1999:10", idxkey = {TCR}, idxproject = {CAVALCADE}, author = {Enrico Gobbetti and Eric Bouvier}, title = {Time-Critical Multiresolution Scene Rendering}, booktitle = {Proceedings IEEE Visualization}, publisher = pub-IEEE, pages = {123--130}, month = oct, year = {1999}, editor = {}, address = {Conference held in San Francisco, CA, USA}, abstract = {We describe a framework for time-critical rendering of graphics scenes composed of a large number of objects having complex geometric descriptions. Our technique relies upon a scene description in which objects are represented as multiresolution meshes. We perform a constrained optimization at each frame to choose the resolution of each potentially visible object that generates the best quality image while meeting timing constraints. The technique provides smooth level-of-detail control and aims at guaranteeing a uniform, bounded frame rate even for widely changing viewing conditions. The optimization algorithm is independent from the particular data structure used to represent multiresolution meshes. The only requirements are the ability to represent a mesh with an arbitrary number of triangles and to traverse a mesh structure at an arbitrary resolution in a short predictable time. A data structure satisfying these criteria is described and experimental results are discussed.}, keywords = {Multiresolution modeling, level of detail, adaptive rendering, time-critical graphics}, annote = {}, url = {https://www.crs4.it/vic/data/papers/ieeeviz99-tcmrsr.pdf}, } @InProceedings{Torguet:1999:CSC, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1999:01", idxdatepub = "1999:06", idxkey = {TCR}, idxproject = {CAVALCADE}, author = {Patrice Torguet and Olivier Balet and Enrico Gobbetti and Jean-Pierre Jessel and J\'er\^ome Duchon and Eric Bouvier}, title = {{CAVALCADE}: A system for Collaborative Prototyping}, editor = {G\'erard Subsol}, booktitle = {Proc. International Scientific Workshop on Virtual Reality and Prototyping}, abstract = { Prototype design and testing is an indispensable stage of any project development in many fields of activity, such as aeronautical, spatial, automotive industries or architecture. Scientists and engineers rely on prototyping for a visual confirmation and validation of both their ideas and concepts. Using computers for designing digital prototypes is not a new idea since CAD applications are nowadays widely used. In this paper we present how new advances in {3D} interaction and real time visualisation research domains lead to the development of a collaborative and really interactive system for virtual prototyping. This work is supported by the European Community through the ESPRIT programme 4.}, pages = {161--170}, year = 1999, month = jun, isbn = {2-9513952-0-5}, note = {Conference held in Laval, France, June 3--4}, url = {https://www.crs4.it/vic/data/papers/laval99-cavalcade.pdf}, } @InProceedings{Zorcolo:1999:CIS, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1999:02", idxdatepub = "1999:05", idxkey = {Haptics, Volren, Surgical}, idxproject = {VIVA}, author = {Antonio Zorcolo and Enrico Gobbetti and Piero Pili and Massimiliano Tuveri}, title = {Catheter insertion simulation with combined visual and haptic feedback}, booktitle = {Proc. First PHANToM Users Research Symposium (PURS'99)}, year = 1999, month = may, note = {Conference held in Heidelberg, Germany, May 21-22}, abstract = { We have developed an experimental catheter insertion system supporting head-tracked stereoscopic viewing of volumetric reconstruction registered with direct haptic {3D} interaction. The system takes as input patient data acquired with standard medical imaging modalities and regards it as a visual and haptic environment whose parameters are defined using look-up tables. By means of a mirror, the screen seems to be positioned like a surgical table providing the impression of looking down at the patient in a natural way. Co-registering physical and virtual spaces beforehand means that the patient appears at a fixed physical positionj on the surgical table and inside the workspace of the PHANToM device which controls catheter insertion. During the insertion procedure the system provides perception of the force of penetration and positional deviation of the inserted catheter.}, url = {https://www.crs4.it/vic/data/papers/purs99-needle.pdf}, } @InCollection{Gobbetti:1999:TDG, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1998:09", idxdatepub = "1999:02", idxkey = {}, idxproject = {}, author = {Enrico Gobbetti and Riccardo Scateni}, title = {Three-Dimensional Graphics}, booktitle = {Encyclopedia of Electrical and Electronics Engineering}, publisher = pub-WILEY, address = pub-WILEY:adr, abstract = { Three-dimensional graphics is the area of computer graphics that deals with producing two-dimensional representations, or images, of three-dimensional synthetic scenes, as seen from a given viewing configuration. The level of sophistication of these images may vary from simple wire-frame representations, where objects are depicted as a set of segment lines, with no data on surfaces and volumes, to photorealistic rendering, where illumination effects are computed using the physical laws of light propagation. All the different approaches are based on the metaphor of a virtual camera positioned in {3D} space and looking at the scene. Hence, independently from the rendering algorithm used, producing an image of the scene always requires the resolution of the following problems: 1. Modeling geometric relationships among scene objects, and in particular efficiently representing the situation in {3D} space of objects and virtual cameras; 2. Culling and clipping, i.e. efficiently determining which objects are visible from the virtual camera; 3. Projecting visible objects on the film plane of the virtual camera in order to render them. This chapter provides an introduction to the field by presenting the standard approaches for solving the aforementioned problems.}, month = feb, year = {1999}, editor = {John G. Webster}, volume = {22}, pages = {168--172}, url = {https://www.crs4.it/vic/data/papers/wiley-eeee.pdf}, } @Article{Turner:1999:MOO, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1997:10", idxdatepub = "1999:06", idxkey = {3DGUI, DESIGN}, idxproject = {METIS}, author = {Russell Turner and Li Song and Enrico Gobbetti}, title = {Metis: An Object-Oriented Toolkit for Constructing Virtual Reality Applications}, journal = j-CG-FORUM, publisher = pub-BLACKWELL, address = pub-BLACKWELL:adr, year = {1999}, month = jun, volume = {18}, number = {2}, pages = {121--131}, abstract = {Virtual reality systems provide realistic look and feel by seamlessly integrating three-dimensional input and output devices. One software architecture approach to constructing such systems is to distribute the application between a computation-intensive simulator back-end and a graphics-intensive viewer front-end which implements user interaction. In this paper we discuss Metis, a toolkit we have been developing based on such a software architecture, which can be used for building interactive immersive virtual reality systems with computationally intensive components. The Metis toolkit defines an application programming interface on the simulator side, which communicates via a network with a standalone viewer program that handles all immersive display and interactivity. Network bandwidth and interaction latency are minimized, by use of a constraint network on the viewer side that declaratively defines much of dynamic and interactive behavior of the application.}, keywords = {}, annote = {}, url = {https://www.crs4.it/vic/data/papers/cgf99-metis.pdf} } @InProceedings{Lecca:1999:MAK, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1998:01", idxdatepub = "1999:05", idxkey = {SCIVIZ}, idxproject = {}, author = "Giuditta Lecca and Ihsen Khlaifi and Erminia Leonardi and Fabio Bettio and Laura Muscas and J. Tarhouni and Claudio Paniconi", editor = "W. De Breuck and L. Walschot", title = {A modular approach to the Korba aquifer seawater intrusion study, 2, Simulation, data manipulation, and visualization for the 3-D model}, booktitle = "Proc. of the 15th Salt Water Intrusion Meeting ({SWIM})", volume = "79", pages = "62--68", publisher = "Flemish Journal of Natural Science", address = "Ghent, Belgium", year = "1999", url = {https://www.crs4.it/vic/data/papers/swim2.pdf} } %################################ %### 1998 %################################ @InCollection{Gobbetti:1998:VRP, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1998:08", idxdatepub = "1998:10", idxkey = {Haptics, Surgical}, idxproject = {VREPAR2}, author = {Enrico Gobbetti and Riccardo Scateni}, title = {Virtual Reality: Past, Present, and Future}, abstract = {This report provides a short survey of the field of virtual reality, highlighting application domains, technological requirements, and currently available solutions. The report is organized as follows: section 1 presents the background and motivation of virtual environment research and identifies typical application domains, section 2 discusses the characteristics a virtual reality system must have in order to exploit the perceptual and spatial skills of users, section 3 surveys current input/output devices for virtual reality, section 4 surveys current software approaches to support the creation of virtual reality systems, and section 5 summarizes the report.}, booktitle = {Virtual Environments in Clinical Psychology and Neuroscience: Methods and Techniques in Advanced Patient-Therapist Interaction}, publisher = pub-IOS, address = pub-IOS:adr, month = nov, year = {1998}, editor = {G. Riva and B. K. Wiederhold and E. Molinari}, pages = {3--20}, url = {https://www.crs4.it/vic/data/papers/vr-report98.pdf}, } @Article{Abdoulaev:1998:VVV, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1998:08", idxdatepub = "1998:12", idxkey = {Volren, Surgical}, idxproject = {VIVA}, author = {Gassan Abdoulaev and Sandro Cadeddu and Giovanni Delussu and Marco Donizelli and Luca Formaggia and Andrea Giachetti and Enrico Gobbetti and Andrea Leone and Cristina Manzi and Piero Pili and Alan Scheinine and Massimiliano Tuveri and Alberto Varone and Alessandro Veneziani and Gianluigi Zanetti and Antonio Zorcolo}, title = {{ViVa}: The Virtual Vascular Project}, journal = {IEEE Transactions on Information Technology in Biomedicine}, publisher = pub-IEEE, year = {1998}, month = dec, volume = {22}, number = {4}, pages = {268--274}, abstract = {The aim of the {ViVa} project is to develop tools for the modern hemodynamicist and cardiovascular surgeon to study and interpret the constantly increasing amount of information being produced by non--invasive imaging equipment. In particular, we are developing a system that will be able to process and visualize {3D} medical data, to reconstruct the geometry of arteries of specific patients and to simulate blood flow in them. The initial applications of the system will be for clinical research and training purposes. In a later stage we will explore the application of the system to surgical planning. {ViVa} is based on an integrated set of tools, each dedicated to a specific aspect of the data processing and simulation pipeline: image processing and segmentation; real-time {3D} volume visualization; {3D} geometry reconstruction; {3D} mesh generation; blood flow simulation and visualization}, keywords = {}, annote = {}, url = {https://www.crs4.it/vic/data/papers/ieee-titb98.pdf}, } @InProceedings{Gobbetti:1998:IVA, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1998:03", idxdatepub = "1998:10", idxkey = {Volren, Surgical}, idxproject = {VIVA}, author = {Enrico Gobbetti and Piero Pili and Antonio Zorcolo and Massimiliano Tuveri}, title = {Interactive Virtual Angioscopy}, booktitle = {Proceedings IEEE Visualization}, publisher = pub-IEEE, pages = {435--438}, month = oct, year = {1998}, editor = {}, address = {Conference held in Research Triangle Park, NC, USA}, abstract = {Virtual angioscopy is a non invasive medical procedure for exploring parts of the human vascular system. We have developed an interactive tool that takes as input data acquired with standard medical imaging modalities and regards it as a virtual environment to be interactively inspected. The system supports real-time navigation with stereoscopic direct volume rendering and dynamic endoscopic camera control, interactive tissue classification, and interactive point picking for morphological feature measurement. In this paper, we provide an overview of the system, discuss the techniques used in our prototype, and present experimental results on human data sets. The accompanying video-tape illustrates our approach with interactive sequences showing the examination of a human carotid artery.}, keywords = {Virtual angioscopy, endoscopy, interactive rendering, volume rendering, virtual environment}, annote = {}, url = {https://www.crs4.it/vic/data/papers/ieeeviz98.pdf}, } @InProceedings{Leone:1998:DFEa, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1998:06", idxdatepub = "1998:09", idxkey = {SCIVIZ}, idxproject = {MIGAVIS}, author = {Andrea O. Leone and Paola Marzano and Enrico Gobbetti and Riccardo Scateni and Sergio Pedinotti}, title = {Discontinuous Finite Element Visualization}, booktitle = {Proceedings 8th International Symposium on Flow Visualization}, pages = {}, month = sep, year = {1998}, editor = {}, address = {Conference held in Sorrento, Italy}, abstract = {The aim of this work is the study and the implementation of appropriate visualization techniques for high-order discontinuous finite element data in two and three-dimensions. In particular, we are dealing with field discontinuity and deformed cells. Such data are produced for example by chemical simulations, by fluid dynamics simulations, or, in general, anywhere high accuracy on boundary domain description is required.}, keywords = {}, annote = {}, url = {https://www.crs4.it/vic/data/papers/fv98.pdf}, } @Article{Turner:1998:ICA, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1997:10", idxdatepub = "1998:06", idxkey = {3DGUI, MODELING, ANIMATION}, idxproject = {LEMAN}, author = {Russell Turner and Enrico Gobbetti}, title = {Interactive Construction and Animation of Layered Elastically Deformable Characters}, journal = j-CG-FORUM, publisher = pub-BLACKWELL, address = pub-BLACKWELL:adr, year = 1998, month = jun, volume = 17, number = 2, pages = {135--152}, abstract = {An interactive system is described for creating and animating deformable 3D characters. By using a hybrid layered model of kinematic and physics-based components together with an immersive 3D direct manipulation interface, it is possible to quickly construct characters that deform naturally when animated and whose behavior can be controlled interactively using intuitive parameters. In this layered construction technique, called the elastic surface layer model, a simulated elastically deformable skin surface is wrapped around a kinematic articulated figure. Unlike previous layered models, the skin is free to slide along the underlying surface layers constrained by geometric constraints which push the surface out and spring forces which pull the surface in to the underlying layers. By tuning the parameters of the physics-based model, a variety of surface shapes and behaviors can be obtained such as more realistic-looking skin deformation at the joints, skin sliding over muscles, and dynamic effects such as squash-and-stretch and follow-through. Since the elastic model derives all of its input forces from the underlying articulated figure, the animator may specify all of the physical properties of the character once, during the initial character design process, after which a complete animation sequence can be created using a traditional skeleton animation technique. Character construction and animation are done using a 3D user interface based on two-handed manipulation registered with head-tracked stereo viewing. In our configuration, a six degree-of-freedom head-tracker and CrystalEyes shutter glasses are used to display stereo images on a workstation monitor that dynamically follow the user head motion. 3D virtual objects can be made to appear at a fixed location in physical space which the user may view from different angles by moving his head. To construct 3D animated characters, the user interacts with the simulated environment using both hands simultaneously: the left hand, controlling a Spaceball, is used for 3D navigation and object movement, while the right hand, holding a 3D mouse, is used to manipulate through a virtual tool metaphor the objects appearing in front of the screen. Hand-eye coordination is made possible by registering virtual space to physical space, allowing a variety of complex 3D tasks necessary for constructing 3D animated characters to be performed more easily and more rapidly than is possible using traditional interactive techniques.}, keywords = {}, annote = {}, url = {https://www.crs4.it/vic/data/papers/cgf98-esl.pdf}, } @InCollection{Leone:1998:DFEc, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1997:12", idxdatepub = "1998:01", idxkey = {SCIVIZ}, idxproject = {MIGAVIS}, author = {Andrea O. Leone and Paola Marzano and Enrico Gobbetti}, title = {Discontinuous Finite Element Visualization}, booktitle = {CRS4 Bulletin 1998}, publisher = inst-CRS4, address = inst-CRS4:adr, year = {1998}, editor = {}, abstract = {The aim of this research line is the study and implementation of appropriate visualization techniques for finite element data in two and three dimensions. In particular, we are dealing with unusual situations such as field discontinuity and deformed cells. Such data is produced for example by chemical simulations, by fluid dynamics simulations, or, in general, anywhere high accuracy on boundary domain description is required.}, pages = {}, url = {}, } @InCollection{Gobbetti:1998:IDV, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1997:12", idxdatepub = "1998:01", idxkey = {Volren, Surgical}, idxproject = {VIVA}, author = {Enrico Gobbetti and Antonio Zorcolo}, title = {Interactive Direct Volume Rendering}, booktitle = {CRS4 Bulletin 1998}, publisher = inst-CRS4, address = inst-CRS4:adr, year = {1998}, editor = {}, abstract = {The aim of this research line is the creation of a virtual environment for the direct analysis in three dimensions of volumetric data. To this end, we study volume rendering techniques that can be used in a time-critical setting and explore the design space of direct {3D} interaction techniques for interacting with volumetric datasets. Our main application domain is currently medical data processing and simulation. We are developing a volume visualization system supporting head-tracked stereoscopic viewing registered with direct {3D} interaction and a virtual endoscopy system merging the concepts of interactive direct volume rendering and endoscopy in a tool for non-invasive analysis and visualization.}, pages = {}, url = {}, } %################################ %### 1997 %################################ @TechReport{Zorcolo:1997:TVV, idxstatus = "Techreport", idxmedium = "Text", idxdatesub = "1997:11", idxdatepub = "1997:11", idxkey = {Volren, Surgical}, idxproject = {VIVA}, author = {Antonio Zorcolo and Piero Pili and Enrico Gobbetti}, title = {{TVR}: Un Visualizzatore Volumetrico ad Alte Prestazioni basato su {OpenGL 1.0}}, institution = inst-CRS4, address = inst-CRS4:adr, number = {97/}, year = {1997}, month = nov, url = {https://www.crs4.it/vic/data/papers/crs4-tr-97-tvr2d.pdf}, thumbnail = {https://www.crs4.it/vic/img/thumb-none.jpg}, } @TechReport{Leone:1997:CSB, idxstatus = "Techreport", idxmedium = "Text", idxdatesub = "1997:10", idxdatepub = "1997:10", idxkey = {Sciviz}, idxproject = {CFD2}, author = {Andrea O. Leone and Enrico Gobbetti}, title = {A Comparative Study on Binary Scientific Data Formats}, institution = inst-CRS4, address = inst-CRS4:adr, number = {97/67}, month = oct, year = {1997}, abstract = {In this technical report a set of binary scientific data formats are examined}, url = {https://www.crs4.it/vic/data/papers/crs4-tr-97-67.pdf}, thumbnail = {https://www.crs4.it/vic/img/thumb-none.jpg}, } @TechReport{Leone:1997:CSM, idxstatus = "Techreport", idxmedium = "Text", idxdatesub = "1997:10", idxdatepub = "1997:10", idxkey = {SCIVIZ}, idxproject = {CFD2}, author = {Andrea O. Leone and Enrico Gobbetti}, title = {A Comparative Study on Modular Visualization Environments}, institution = inst-CRS4, address = inst-CRS4:adr, number = {97/65}, month = oct, year = {1997}, abstract = {Modular Visualization Environment (MVE) systems constitute a particular class of Visualization Packages. They are not visualization programs, but rather environments to build visualization applications, accordingly to specific needs of data representation. It is now clear that MVE belong to the class of general purpose Visualization Packages, because this is the main property that characterize them. From the end-user point of view, MVE are extemely versatile and flexible. In many cases, setting up a personalized representation of data simply consists in interconnecting in a network a number of pre-existing modules with atomic functionality, creating the specific visualization pipeline that ends with the rendering of the data. The longer is the list of available modules, the bigger is the number of different visualization applications the user can build in the MVE. The following MVEs are reviewed in this document: Application Visualization System (AVS), by Advanced Visual Systems Inc., Data Explorer, by IBM Inc., and IRIS Explorer, by NAG Ltd.}, url = {https://www.crs4.it/vic/data/papers/crs4-tr-97-65.pdf}, thumbnail = {https://www.crs4.it/vic/img/thumb-none.jpg}, } @Article{Pili:1997:IVC, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1997:03", idxdatepub = "1997:09", idxkey = {Volren, Surgical}, idxproject = {VIVA}, author = {Piero Pili and Antonio Zorcolo and Enrico Gobbetti and Massimiliano Tuveri}, title = {Interactive {3D} Visualization of Carotid Arteries}, journal = {International Angiology}, year = {1997}, month = sep, volume = {16}, number = {3}, pages = {153}, publisher = {Minerva Medica}, abstract = {We have developed an experimental medical volume visualization system supporting head-tracked stereoscopic viewing registered with direct {3D}-interaction. We aim to assess the suitability of these techiniques for surgical planning tasks in real medical settings. We are interested in visualizing carotid arteries in depth by using interactive volume visualization, motion parallax and stereoscopic cues. Our display when positioned as a surgical table provides the impression of looking down at the patient in a naturalistic way. With simple head motion good positions to observe the pathology are quickly estabilished. A six degree-of-freedom head tracker measures head motion then appropriate stereoscopic images are dymically generated for shutter-glass {3D} viewing. Co-registrating physical and virtual spaces beforhand means volume appear at fixed physical positions and permits directly interaction via a {3D} pointing device. The system was tested on a SGI Infinite Reality, with CristalEyes shutter-glasses and Logitech {3D} trackers. This permits interactive work with an operative data set.}, keywords = {Medical Volume Visualization, Head-tracked Stereoscopic Viewing, {3D} Interaction, Interactive Visualization}, annote = {}, url = {} } @InProceedings{Zorcolo:1997:MVV, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1997:03", idxdatepub = "1997:09", idxkey = {Volren, Surgical}, idxproject = {VIVA}, author = {Antonio Zorcolo and Piero Pili and Enrico Gobbetti}, title = {A Medical Volume Visualization System supporting Head-tracked Stereoscopic Viewing and Direct {3D} Interaction}, booktitle = {Proceedings 15th International EuroPACS Meeting}, pages = {}, month = sep, year = {1997}, editor = {C. Bartolozzi and D. Caramella}, address = {Conference held in Pisa, Italy}, abstract = {We have developed an experimental medical volume visualization system supporting head-tracked stereoscopic viewing registered with direct {3D} interaction. Our aim is to assess the suitability of these techniques for surgical planning tasks in real medical settings. In particular, vascular surgeons examining the distal site of the aneurysmatic sack are assisted by visualizing the artery aneurysm in depth. A better understanding of such complex spatial structures is achieved by incorporatingmotion parallax and stereoscopic cues to depth perception not available from static images. Our display when positioned as a surgical table provides theimpression of looking down at the patient in a naturalistic way. With simple head motions, good positions for observing the pathology are quickly established.}, keywords = {Medical Volume Visualization, Head-tracked Stereoscopic Viewing, {3D} Interaction, Interactive Visualization}, annote = {}, url = {https://www.crs4.it/vic/data/papers/europacs97.pdf} } @Article{Zorcolo:1997:HTS, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1997:03", idxdatepub = "1997:09", idxkey = {Volren, Surgical}, idxproject = {VIVA}, author = {Antonio Zorcolo and Piero Pili and Enrico Gobbetti}, title = {Head-Tracked Stereoscopic Viewing and {3D} Interaction for Medical Volume Visualization}, journal = {Computer Aided Surgery}, year = {1997}, month = sep, volume = {2}, number = {}, pages = {42--49}, publisher = pub-WILEY, address = pub-WILEY:adr, editor = {}, note = {Proceedings First International Congress on Computer-Integrated Surgery in the Area od the Head and Spine (CIS'97), Held in Linz, Austria, September 25--27}, abstract = {}, keywords = {}, annote = {}, url = {} } @InProceedings{Turner:1997:MOO, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1997:03", idxdatepub = "1997:09", idxkey = {3DGUI,DESIGN}, idxproject = {METIS}, author = {Russell Turner and Li Song and Enrico Gobbetti}, title = {Metis: An Object-Oriented Toolkit for Constructing Virtual Reality Applications}, booktitle = {Proceedings Sixth Eurographics Workshop on Programming Paradigms in Graphics}, series = {Eurographics Workshop Proceedings Series}, publisher = pub-EUROGRAPHICS, pages = {79--90}, month = sep, year = {1997}, editor = {Farhad Arbab and Philipp Slusallek}, address = {Conference held in Budapest, Hungary}, abstract = {Virtual reality systems provide realistic look and feel by seamlessly integrating three-dimensional input and output devices. One software architecture approach to constructing such systems is to distributethe application between a computation-intensive simulator back-end and a graphics-intensive viewer front-end which implements user interaction. Inthis paper we discuss Metis, a toolkit we have been developing based on such a software architecture, which can be used for building interactiveimmersive virtual reality systems with computationally intense components. The Metis toolkit defines an application programming interface on thesimulator side, which communicates via a network with a standalone viewer program that handles all immersive display and interactivity. Networkbandwidth and interaction latency are minimized, by use of constraint network on the viewer side that declaratively defines much of dynamic andinteractive behavior of the application.}, keywords = {Virtual Reality, Distribution, Object-Oriented Graphics, Constraint}, annote = {}, url = {https://www.crs4.it/vic/data/papers/ewppg97.pdf} } @InProceedings{Benevento:1997:WSW, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1996:12", idxdatepub = "1997:04", idxkey = {MM}, idxproject = {}, author = {Francesco Benevento and Enrico Gobbetti}, title = {{WebVideo}: Simple Web Tools for Video Viewing and Browsing}, pages = {}, booktitle = {Proceedings Sixth International World Wide Web Conference, Poster Sessions}, year = {1997}, month = apr, editor = {}, address = {Conference held in Santa Clara, CA, USA}, abstract = {In this paper, we describe WebVideo, a library of simple tools for video browsing and viewing. The library offers a solid basis for developing web applications to view and browse video sequences and slide sets, as well as for distributing videos and images from real-time video sources. Since video transmission is based on a de-facto standard technology, the server-push method, remote users do not need any specific hardware or software in addition to a web browser. The tools offer an adequate support for applications where image quality and simplicity of use are more important than high frame rates and perfect audio synchronization. WebVideo is available in the public domain and has been used for over one year in applications such as distance learning, slides presentations, and Internet TV broadcasting.}, keywords = {Image browsing, Web tools, Hypermedia, Server Push, Live video}, annote = {}, url = {https://www.crs4.it/vic/data/papers/www6.pdf} } @TechReport{Leone:1997:THD, idxstatus = "Techreport", idxmedium = "Text", idxdatesub = "1997:04", idxdatepub = "1997:04", idxkey = {SCIVIZ}, idxproject = {}, author = {Andrea O. Leone and Maria Grazia Setzu and Gianluigi Zanetti and Enrico Gobbetti}, title = {Time History: A data format for scientific data}, institution = inst-CRS4, address = inst-CRS4:adr, number = {97/11}, year = {1997}, url = {} } @TechReport{Bettio:1997:ATD, idxstatus = "Techreport", idxmedium = "Text", idxdatesub = "1997:02", idxdatepub = "1997:02", idxkey = {RENDERING}, idxproject = {}, author = {Fabio Bettio and Riccardo Scateni}, title = {Acquisizione e Trattamento delle Immagini Digitali}, institution = inst-CRS4, address = inst-CRS4:adr, number = {97/30}, year = {1997}, } @TechReport{Gobbetti:1997:TVV, idxstatus = "Techreport", idxmedium = "Text", idxdatesub = "1997:02", idxdatepub = "1997:02", idxkey = {Volren, Sciviz}, idxproject = {}, author = {Enrico Gobbetti and Piero Pili and Riccardo Scateni}, title = {Tecniche di visualizzazione volumetrica di carotaggi}, institution = inst-CRS4, address = inst-CRS4:adr, number = {97/29}, year = {1997}, url = {https://www.crs4.it/vic/data/papers/crs4-tr-97-29.pdf}, thumbnail = {https://www.crs4.it/vic/img/thumb-none.jpg}, } @InCollection{Gobbetti:1997:EAE, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1995:12", idxdatepub = "1997:01", idxkey = {Terrain, TCR, MM}, idxproject = {I3D}, author = {Enrico Gobbetti and Russell Turner}, title = {Exploring Annotated {3D} Environments on the World-Wide Web}, pages = {31--46}, booktitle = {Intelligent Hypertext: Advanced Techniques for the World-Wide Web}, series = ser-LNCS, volume = {1326}, publisher = pub-SV, address = pub-SV:adr, year = {1997}, editor = {Jim Mayfield and Charles Nicholas}, abstract = {The long-term goal of combining virtual reality and the Internet is to create networked multi-user simulations of virtual environments. The Virtual Reality Modeling Language (VRML) represents a limited but significant step towards this goal by creating a standard data file format for representing {3D} scene information, together with hyper-link information for associating it with other types of Web documents. Current proposals for extending VRML-1.0 to add behaviors will bring this goal closer, but much work remains to be done. This chapter gives a brief summary of VRML and then describes two significant projects currently under development based on {i3D}, a high-performance VRML browser developed by one of the authors. The first of these, currently being used at the European Laboratory for Particle Physics ({CERN}), uses an annotated virtual environment to visualize and walk through the physical design of the new Lepton-Hadron Collider (LHC) before it is built. The second project, Virtual Sardinia, allows the user to tour a {3D} terrain visualization of the island and access historic and tourist information through hyper-links.}, keywords = {}, annote = {}, url = {https://www.crs4.it/vic/data/papers/ht-lncs96.pdf} } %################################ %### 1996 %################################ @Article{Balaguer:1996:UIG, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1994:03", idxdatepub = "1996:08", idxkey = {VB2, 3DGUI, DESIGN, ANIMATION}, idxproject = {VB2}, author = {Jean-Francis Balaguer and Enrico Gobbetti}, title = {{3D} User Interfaces for General-Purpose {3D} Animation}, journal = j-COMPUTER, publisher = pub-IEEE, volume = {29}, number = {8}, pages = {71--78}, month = aug, year = {1996}, coden = {CPTRB4}, issn = {0018-9162}, affiliation = {CERN, Geneva, Switzerland}, classification= {722.2; 723.2; 723.5}, journalabr = {Computer}, abstract = {Modern {3D} animation systems let a growing number of people generate increasingly sophisticated animated movies, frequently for tutorials or multimedia documents. However, although these tasks are inherently three dimensional, these systems' user interfaces are still predominantly two dimensional. This makes it difficult to interactively input complex animated {3D} movements. We have developed Virtual Studio, an inexpensive and easy-to-use {3D} animation environment in which animators can perform all interaction directly in three dimensions. Animators can use {3D} devices to specify complex {3D} motions. Virtual tools are visible mediators that provide interaction metaphors to control application objects. An underlying constraint solver lets animators tightly couple application and interface objects. Users define animation by recording the effect of their manipulations on models. Virtual Studio applies data-reduction techniques to generate editable representations of each animated element that is manipulated.}, keywords = {Animation; Computer graphics equipment; Computer simulation; Computer workstations; Data reduction; Direct manipulation; General purpose animation systems; Graphical user interfaces; Graphics architecture; Human computer interaction; Interactive computer graphics; Interactive devices; Interprocess communication; Motion pictures; Special effects; Synthetic worlds; Three dimensional animation; Three dimensional computer graphics; Three dimensional interaction techniques; Two dimensional; User interfaces; Virtual reality; Virtual Studio; Virtual tools}, url = {https://www.crs4.it/vic/data/papers/ieee-computer96.pdf} } @Article{Turner:1996:HTS, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1996:01", idxdatepub = "1996:09", idxkey = {LEMAN,3DGUI, MODELING, ANIMATION}, idxproject = {LEMAN}, author = {Russell Turner and Enrico Gobbetti and Ian Soboroff}, title = {Head-Tracked Stereo Viewing with Two-Handed {3D} Interaction for Animated Character Construction}, journal = j-CG-FORUM, publisher = pub-BLACKWELL, address = pub-BLACKWELL:adr, volume = {15}, number = {3}, pages = {197--206, 470}, month = sep, year = {1996}, issn = {0167-7055}, classification= {722.2; 722.4; 723.1; 723.5; 741.2; C5260B (Computer vision and image processing techniques); C5540B (Interactive-input devices); C6130B (Graphics techniques)}, note = {Proceedings of the 1996 17th Annual Conference and Exhibition of the European Association for Computer Graphics, Eurographics'96, Held in Poitiers, France}, abstract = {In this paper, we demonstrate a new interactive {3D} desktop metaphor based on two-handed {3D} direct manipulation registered with head-tracked stereo viewing. In our configuration, a six-degree-of-freedom head-tracker and CrystalEyes shutter glasses are used to produce stereo images that dynamically follow the user head motion. {3D} virtual objects can be made to appear at a fixed location in physical space which the user may view from different angles by moving his head. The user interacts with the simulated {3D} environment using both hands simultaneously. The left hand, controlling a Spaceball, is used for {3D} navigation and object movement, while the right hand, holding a {3D} mouse, is used to manipulate through a virtual tool metaphor, the objects appearing in front of the screen because of negative parallax. In this way, both incremental and absolute interactive input techniques are provided by the system. Hand-eye coordination is made possible by registration between virtual and physical space, allowing a variety of complex {3D} tasks to be performed more easily and more rapidly than is possible using traditional interactive techniques. The system has been tested using both Polhemus Fastrak and Logitech ultrasonic input devices for tracking the head and {3D} mouse.}, keywords = {3D animated characters; {3D} mouse; {3D} navigation; {3D} object; {3D} virtual objects; absolute interactive input techniques; animated character construction; Animated character construction; animated character construction; Animation; Character modeling; complex {3D} tasks; computer animation; computer graphic equipment; Computer simulation; Computer software; coordination; CrystalEyes shutter glasses; devices; freedom head-tracker; hand-eye; Head tracking; head-tracked stereo viewing; images; incremental interactive input techniques; interactive; interactive {3D} desktop; Interactive computer graphics; Interactive computer systems; Logitech; metaphor; Mice (computer peripherals); movement; Polhemus Fastrak ultrasonic input; simulated environment; six degree-of-; Spaceball; stereo; stereo image processing; Stereo vision; Stereoscopic display; Three dimensional computer graphics; two-handed {3D} direct manipulation; two-handed {3D} interaction; ultrasonic input devices; User interfaces; virtual reality; Virtual reality; virtual space; Virtual tools}, url = {https://www.crs4.it/vic/data/papers/eg96-htsv.pdf} } @Article{Gobbetti:1996:VSL, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1996:03", idxdatepub = "1996:09", idxkey = {Terrain, TCR, MM, VISIM}, idxproject = {I3D}, author = {Enrico Gobbetti and Andrea Leone}, title = {Virtual Sardinia: a Large-Scale Hypermedia Regional Information System}, journal = {Computer Networks and ISDN Systems}, year = {1996}, volume = {28}, number = {7--11}, pages = {1539--1546}, month = sep, abstract = {The Virtual Sardinia project aims at collecting a large amount of heterogeneous data concerning the island of Sardinia and representing them in such a way that a casual user can easily navigate through them in a virtual trip. All these data are interconnected in an hypermedia way, browsable in the World-wide Web, ranging from geographic to archaeological data, from historical to touristical information, both in {2D} and {3D}. One of the central components of Virtual Sardinia is {i3D}, a high-speed {3D} scene viewer for the World-wide Web. Using a Spaceball, the user can intuitively navigate with continuous viewpoint control inside three-dimensional data, while selecting {3D} objects with the mouse triggers requests for access to remote media documents that can be distributed over the Internet. This allows to explore interactively a three-dimensional reconstruction of the island of Sardinia built from a digital terrain model texture-mapped with satellite images. Alternate interactive views of the model are provided in the form of movies, sequences, or clickable maps. The combination of these models becomes a natural front-end for querying all kinds of scientific, cultural, and touristic information about Sardinia. We believe that this approach opens a new way to create regional information systems easily available that could be made available to the general public.}, keywords = {Hypermedia, {3D} Visualization, VRML, WWW Browser, View-and-markup Tools.}, annote = {}, MASKEDurl = {https://www.crs4.it/PRJ/VIRTSARD/PAPERS/WWW96/} } @InCollection{Criscione:1996:DIS, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1995:12", idxdatepub = "1996:01", idxkey = {SCIVIZ}, idxproject = {}, author = {Paola Criscione and Claudio Montani and Riccardo Scateni and Roberto Scopigno}, title = {{DiscMC}: an interactive system for fast fitting isosurface on volume data}, pages = {178--190}, booktitle = {Virtual Environments and Scientific Visualization}, publisher = pub-SV, address = pub-SV:adr, year = {1996}, editor = {Martin Goebel}, abstract = {}, keywords = {}, annote = {}, url = {}, } @InCollection{Gobbetti:1996:ITN, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1995:12", idxdatepub = "1996:01", idxkey = {Terrain, TCR, MM}, idxproject = {I3D}, author = {Enrico Gobbetti and Riccardo Scateni}, title = {Interactive Tools for Navigation in {3D} Spaces}, booktitle = {CRS4 Bulletin 1995}, publisher = inst-CRS4, address = inst-CRS4:adr, year = 1996, editor = {}, abstract = {{i3D} is a VRML browser, a system that combines the {3D} input and high-performance rendering capabilities of high-end virtual reality systems with the data fetching abilities of network browsers. Using a Spaceball, the user can intuitively navigate inside the three-dimensional data, while selecting {3D} objects with the mouse triggers requests for access to remote media documents that can be text, still images, animations or even other {3D} models. Time-critical rendering techniques allow the system to display complex {3D} scenes at high and constant frame rates, making it possible to use it in the context of large scale projects. The system is currently being used at {CERN} as a visualization and data management tool for the design of the new Large Hadron Collider, and is used at {CRS4} as a basis for the Virtual Sardinia project}, pages = {80--84}, url = {}, } @InCollection{Balaguer:1996:IHS, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1995:12", idxdatepub = "1996:05", idxkey = {TCR, MM}, idxproject = {I3D}, author = {Jean-Francis Balaguer and Enrico Gobbetti}, title = {{i3D}: A High Speed {3D} Web Browser}, pages = {}, booktitle = {VRML: Bringing Virtual Reality to the Internet}, month = may, year = {1996}, editor = {John R. Vacca}, publisher = pub-AP-PROFESSIONAL, address = pub-AP-PROFESSIONAL:adr, isbn = {0127099115}, abstract = {}, keywords = {}, note = {Revised version of VRML'95 conference paper}, annote = {}, url = {} } %################################ %### 1995 %################################ @Booklet{Gobbetti:1996:TOO, idxstatus = "Lecture", idxmedium = "Text", idxdatesub = "1996:06", idxdatepub = "1996:06", idxkey = {}, idxproject = {}, title = {Topics in Object-Oriented Technology}, author = {Enrico Gobbetti and Charles Nicholas}, howpublished = {Graduate Course 691J Tutorial Notes}, address = {Computer Science Department, University of Maryland Baltimore County}, abstract = {The course will provide an in-depth treatment of object-oriented software development, including analysis, design, and programming. The focus will be on the construction of correct, reusable, and efficient software through a systematic application of design by contract. One important design method will be presented in detail, different programming paradigms will be contrasted, and several object-oriented languages (including C++, Eiffel, and Java) will be analyzed. Individual programming projects and a group design project are planned. Students will also be expected to make in-class presentations on selected topics.}, year = 1996, MASKEDthumbnail = {https://www.crs4.it/vic/img/thumb-none.jpg}, MASKEDurl = {https://www.csee.umbc.edu/courses/graduate/691J/} } @InCollection{Leone:1995:VIC, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1995:05", idxdatepub = "1995:05", idxkey = {SCIVIZ}, idxproject = {}, author = "Andrea O. Leone and Riccardo Scateni", title = "Visualization of Internal Combustion Simulations in a Modular Environment", booktitle = "Visualization in Scientific Computing '95", pages = "126--134", publisher = "Springer-Verlag Wien", year = "1995", ISBN = "3-211-82729-3", isnn = "0946-2767", editor = "R. Scateni and J. van Wijk and P. Zanarini", month = may, abstract = "We describe here a solution to the problem of visualizing the results of simulations of a combustion chamber in a power plant. We used for this a modular visualization environment: Iris Explorer. We sketch first the fluid-dynamics problem to solve, and then focus our attention on how to face the visualization problems, especially how to visualize several different scalar fields at the same time. Then we describe our proposed environment for the solution with the description of several new modules for Iris Explorer we implemented describing their advantages and disadvantages. Finally, we talk about the possible future evolution of the project.", } @InProceedings{Balaguer:1995:IHS, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1995:06", idxdatepub = "1995:12", idxkey = {TCR, MM}, idxproject = {I3D}, author = {Jean-Francis Balaguer and Enrico Gobbetti}, title = {{i3D}: a high-speed {3D Web} browser}, editor = {}, booktitle = {1995 Symposium on the Virtual Reality Modeling Language ({VRML} `95)}, publisher = pub-ACM, address = {Conference held in San Diego, CA, USA}, isbn = {0-89791-818-5}, pages = {69--76}, year = {1995}, month = dec, affiliation = {Center for Adv. Studies, Res. and Dev., Sardinia, Italy}, abstract = {In this paper, we present {i3D}, a system that combines the {3D} input and high-performance rendering capabilities of high-end virtual reality systems with the data fetching abilities of network browsers. Using a Spaceball, the user can intuitively navigate inside the three-dimensional data, while selecting {3D} objects with the mouse triggers requests for access to remote media documents that can be text, still images, animations or even other {3D} models. Time-critical rendering techniques allow the system to display complex {3D} scenes at high and constant frame rates, making it possible to use it in the context of large scale projects. The system is currently being used at {CERN} as a visualization and data management tool for the design of the new Large Hadron Collider, and at {CRS4} for the Virtual Sardinia project and in the networked educational system {IPERLER}. {i3D} is available through anonymous ftp from various sites on the Internet.}, keywords = {Hypermedia, {3D} Visualization, VRML, WWW Browser, View-and-markup Tools, SGML}, url = {https://www.crs4.it/vic/data/papers/vrml95.pdf} } @InProceedings{Gobbetti:1995:VSH, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1995:06", idxdatepub = "1995:12", idxkey = {Terrain, TCR, MM}, idxproject = {I3D}, author = {Enrico Gobbetti and Andrea Leone and Alberto Marini}, title = {Virtual Sardinia: a Hypermedia Fly-through with Real Data}, pages = {253--260}, booktitle = {Proceedings of the International Workshop on Soft Computing in Remote Sensing Data Analysis}, year = {1995}, editor = {E. Binaghi and P. A. Brivio and A. Rampini}, address = {Conference held in Milan, Italy}, abstract = {The Virtual Sardinia project aims at collecting a large amount of heterogeneous data concerning the island of Sardinia and representing them in such a way that a casual user can easily navigate through them in a virtual trip. All these data are interconnected in an hypermedia way, browsable in the World-wide Web, ranging from geographic to archaeological data, from historical to touristical info, both in {2D} and {3D}. The central component of Virtual Sardinia is {i3D}, a high-speed {3D} scene viewer for the World-wide Web. Using a Spaceball, the user can intuitively navigate with continuous viewpoint control inside three-dimensional data, while selecting {3D} objects with the mouse triggers requests for access to remote media documents that can be distributed over the Internet. For the Virtual Sardinia project, the main {3D} model that is explored by the users is a three-dimensional reconstruction of the island of Sardinia built from a digital terrain model texture-mapped with satellite images.}, keywords = {}, annote = {}, url = {https://www.crs4.it/vic/data/papers/wscrsda95.pdf} } @InProceedings{Gobbetti:1995:IIS, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1995:02", idxdatepub = "1995:08", idxkey = {TCR, MM}, idxproject = {I3D}, author = {Enrico Gobbetti and Jean-Francis Balaguer}, title = {{i3D}: An Interactive System for Exploring Annotated {3D} Environments}, pages = {16--30}, booktitle = {Scientific Visualization '95 (AICA '95 International Symposium on Scientific Visualization Proceedings)}, year = {1995}, month = aug, editor = {Riccardo Scateni}, address = {Conference held in Chia, Italy}, publisher = pub-WORLD-SCI, abstract = {In this paper, we present I3D, a system that combines the {3D} input and high-performance rendering capabilities of high-end virtual reality systems with the data fetching abilities of network browsers. Using a Spaceball, the user can intuitively navigate inside the three-dimensional data, while selecting {3D} objects with the mouse triggers requests for access to remote media documents that can be text, still images, animations or even other {3D} models. Time-critical rendering techniques allow the system to display complex {3D} scenes at high and constant frame rates, making it possible to use it in the context of large scale projects. The system is currently being used at {CERN} as a visualization and data management tool for the design of the new Large Hadron Collider, and will be used at {CRS4} in the networked educational system IPERLER.}, keywords = {Hypermedia, {3D} Visualization, {3D} Walkthrough, View-and-markup Tools.}, annote = {}, url = {https://www.crs4.it/vic/data/papers/aica95.pdf} } @InProceedings{Balaguer:1995:SIAa, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1995:03", idxdatepub = "1995:09", idxkey = {VB2,3DGUI, DESIGN, ANIMATION}, idxproject = {VB2}, author = {Jean-Francis Balaguer and Enrico Gobbetti}, title = {Supporting Interactive Animation Using Multi-way Constraints}, pages = {17--28}, booktitle = {Proceedings of the Fifth Eurographics Workshop on Programming Paradigms in Graphics}, publisher = pub-EUROGRAPHICS, address = {Conference held in Maastricht, The Netherlands}, month = sep, year = {1995}, editor = {Remco Veltkamp and Edwin Blake}, abstract = {This paper presents how the animation subsystem of an interactive environment for the visual construction of {3D} animations has been modeled on top of an object-oriented constraint imperative architecture. The system demonstrates that, although they are limited to expressing acyclic conflict-free graphs, multi-way dataflow constraint are general enough to model a large variety of behaviors while remaining efficient enough to ensure the responsiveness of large interactive {3D} graphics applications.}, keywords = {Data Reduction, {3D} Animation, {3D} Interaction, Performance-Driven Animation}, annote = {}, url = {} } @InCollection{Balaguer:1995:SIAc, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1995:09", idxdatepub = "1995:09", idxkey = {VB2,3DGUI, DESIGN, ANIMATION}, idxproject = {VB2}, author = {Jean-Francis Balaguer and Enrico Gobbetti}, title = {Supporting Interactive Animation Using Multi-way Constraints}, pages = {17--28}, booktitle = {Programming Paradigms in Graphics'95}, publisher = pub-SV, address = pub-SV:adr, year = {1995}, editor = {Remco Veltkamp and Edwin Blake}, abstract = {This paper presents how the animation subsystem of an interactive environment for the visual construction of {3D} animations has been modeled on top of an object-oriented constraint imperative architecture. In our architecture, there is no intrinsic difference between user-interface and application objects. Multi-way dataflow constraints provide the necessary tight coupling among components that makes it possible to seamlessly compose animated and interactive behaviors. Indirect paths allow an effective use of the constraint model in the context of dynamic applications. The ability of the underlying constraint solver to deal with hierarchies of multi-way, multi-output dataflow constraints, together with the ability of the central state manager to handle indirect constraints are exploited to define most of the behaviors of the modeling and animation components in a declarative way. The ease of integration between all system's components opens the door to novel interactive solution to modeling and animation problems. By recording the effects of the user's manipulations on the models, all the expressive power of the {3D} user interface is exploited when defining animations. This performance-based approach complements standard key-framing systems by providing the ability to create animations with straight-ahead actions. At the end of the recording session, animation tracks are automatically updated to integrate the new piece of animation. Animation components can be easily synchronized using constrained manipulation during playback. The system demonstrates that, although they are limited to expressing acyclic conflict-free graphs, multi-way dataflow constraint are general enough to model a large variety of behaviors while remaining efficient enough to ensure the responsiveness of large interactive {3D} graphics applications.}, note = {Revised version of Eurographics Workshop on Programming Paradigms in Graphics'95 paper}, keywords = {Data Reduction, {3D} Animation, {3D} Interaction, Performance-Driven Animation}, annote = {}, url = {https://www.crs4.it/vic/data/papers/ewppcg95.pdf} } @InProceedings{Gobbetti:1995:IEV, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1995:01", idxdatepub = "1995:08", idxkey = {VB2,3DGUI, DESIGN, ANIMATION}, idxproject = {VB2}, author = {Enrico Gobbetti and {Jean-Francis} Balaguer}, title = {An Integrated Environment to Visually Construct {3D} Animations}, editor = {Robert Cook}, series = {Annual Conference Series}, pages = {395--398}, booktitle = {SIGGRAPH 95 Conference Proceedings}, year = {1995}, organization = {ACM SIGGRAPH}, publisher = pub-AW, month = aug, address = {Conference held in Los Angeles, CA, USA}, abstract = {In this paper, we present an expressive {3D} animation environment that enables users to rapidly and visually prototype animated worlds with a fully {3D} user-interface. A {3D} device allows the specification of complex {3D} motion, while virtual tools are visible mediators that live in the same {3D} space as application objects and supply the interaction metaphors to control them. In our environment, there is no intrinsic difference between user-interface and application objects. Multi-way constraints provide the necessary tight-coupling among components that makes it possible to seamlessly compose interactive and animated behaviors. By recording the effects of manipulations, all the expressive power of the {3D} user-interface is exploited to define animations. Effective editing of recorded manipulations is made possible by compacting all continuous parameter evolutions with an incremental data-reduction algorithm, designed to preserve both geometry and timing. The automatic generation of editable representations of interactive performances overcomes one of the major limitations of current performance animation systems. Novel interactive solutions to animation problems are made possible by the tight integration of all system components. In particular, animations can be synchronized by using constrained manipulation during playback. The accompanying video tape illustrates our approach with interactive sequences showing the visual construction of {3D} animated worlds. All the demonstrations were recorded live and were not edited.}, url = {https://www.crs4.it/vic/data/papers/sig95.pdf} } @Article{Balaguer:1995:SA, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1995:01", idxdatepub = "1995:09", idxkey = {VB2,3DGUI, DESIGN, ANIMATION}, idxproject = {VB2}, author = {Jean-Francis Balaguer and Enrico Gobbetti}, title = {Sketching {3D} Animations}, journal = j-CG-FORUM, publisher = pub-BLACKWELL, address = pub-BLACKWELL:adr, volume = {14}, number = {3}, pages = {C241--C258}, month = sep, year = {1995}, coden = {CGFODY}, issn = {0167-7055}, classification= {C4130 (Interpolation and function approximation); C6130B (Graphics techniques); C6180 (User interfaces)}, note = {Proceedings of the 1995 16th Annual Conference and Exhibition of the European Association for Computer Graphics, Eurographics'95, Held in Maastricht, the Netherlands}, corpsource = {Center for Res. Studies in Sardinia, Cagliari, Italy}, abstract = {We are interested in providing animators with a general-purpose tool allowing them to create animations using straight-ahead actions as well as pose-to-pose techniques. Our approach seeks to bring the expressiveness of real-time motion capture systems into a general-purpose multi-track system running on a graphics workstation. We emphasize the use of high-bandwidth interaction with {3D} objects together with specific data reduction techniques for the automatic construction of editable representations of interactively sketched continuous parameter evolution. In this paper, we concentrate on providing a solution to the problem of applying data reduction techniques in an animation context. The requirements that must be fulfilled by the data reduction algorithm are analyzed. From the Lyche and Moerken knot removal strategy, we derive an incremental algorithm that computes a B-spline approximation to the original curve by considering only a small piece of the total curve at any time. This algorithm allows the processing of the user's captured motion in parallel with its specification, and guarantees constant latency time and memory needs for input motions composed of any number of samples. After showing the results obtained by applying our incremental algorithm to {3D} animation paths, we describe an integrated environment to visually construct 3D animations, where all interaction is done directly in three dimensions. By recording the effects of user's manipulations and taking into account the temporal aspect of the interaction, straight-ahead animations can be defined. Our algorithm is automatically applied to continuous parameter evolution in order to obtain editable representations. The paper concludes with a presentation of future work.}, keywords = {3D; {3D} animation sketching; {3D} objects; ahead actions; B-spline approximation; computer animation; Constant latency time; constant latency time; continuous parameter evolution; Continuous parameter evolution; data reduction; Data reduction; data reduction; graphics workstation; Graphics workstation; graphics workstation; high-bandwidth interaction; High-bandwidth interaction; Incremental algorithm; incremental algorithm; interactive systems; Knot removal strategy; knot removal strategy; multitrack system; Multitrack system; multitrack system; objects; pose-to-pose techniques; Pose-to-pose techniques; pose-to-pose techniques; real-time motion capture; Real-time motion capture; real-time systems; specification; Specification; specification; splines (mathematics); straight-; Straight-ahead actions; three dimensional animation; Three dimensional animation; three dimensional animation; user interfaces}, thesaurus = {Computer animation; Data reduction; Interactive systems; Real-time systems; Splines [mathematics]; User interfaces}, treatment = {P Practical; T Theoretical or Mathematical}, url = {https://www.crs4.it/vic/data/papers/eg95.pdf} } @Booklet{Balaguer:1995:LFD, idxstatus = "Lecture", idxmedium = "Text", idxdatesub = "1995:01", idxdatepub = "1995:09", idxkey = {VB2,3DGUI, DESIGN, ANIMATION}, idxproject = {VB2}, title = {Leaving Flatland: from the Desktop Metaphor to Virtual Reality}, author = {Jean-Francis Balaguer and Enrico Gobbetti}, howpublished = {Eurographics -- 15th Annual Conference and Exhibition of the European Association for Computer Graphics}, address = {Conference held in Maastricht, The Netherlands}, month = aug, year = 1995, } %################################ %### 1994 %################################ @InProceedings{Montani:1994:DMC, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1994:10", idxdatepub = "1994:03", idxkey = {SCIVIZ}, idxproject = {}, author = "Claudio Montani and Riccardo Scateni and Roberto Scopigno", title = "Discretized Marching Cubes", pages = "281--287", ISBN = "0-8186-6626-9", editor = "R. Daniel Bergeron and Arie E. Kaufman", booktitle = "Proceedings of the Conference on Visualization", month = oct, publisher = "IEEE Computer Society Press", address = "Los Alamitos, CA, USA", year = "1994", } @Article{Montani:1994:MLT, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1998:11", idxdatepub = "2000:02", idxkey = {SCIVIZ}, idxproject = {}, author = {Claudio Montani and Riccardo Scateni and Roberto Scopigno}, title = {A modified look-up table for implicit disambiguation of Marching Cubes}, journal = {The Visual Computer}, publisher = pub-SV, address = pub-SV:adr, volume = 10, number = 6, pages = {353--355}, month = dec, year = 1994, abstract = {}, keywords = {}, url = {}, } %################################ %### 1993 %################################ @InProceedings{Gobbetti:1993:VAI, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1993:03", idxdatepub = "1993:10", idxkey = {VB2,3DGUI, DESIGN, ANIMATION}, idxproject = {VB2}, author = {Enrico Gobbetti and Jean-Francis Balaguer}, title = {{VB2}: An Architecture for Interaction in Synthetic Worlds}, booktitle = {Proceedings of the ACM SIGGRAPH Symposium on User Interface Software and Technology}, series = {Virtual Reality}, publisher = pub-ACM, address = {Conference held in Atlanta, GA, USA}, pages = {167--178}, year = {1993}, copyright = {(c) Copyright 1993 Association for Computing Machinery}, keywords = {User interface design, {3D} interaction, {3D} virtual tools, Gestural input, Virtual reality, Object-oriented graphics, Hierarchical constraints}, abstract = {This paper describes the {VB2} architecture for the construction of three-dimensional interactive applications. The system's state and behavior are uniformly represented as a network of interrelated objects. Dynamic components are modeled by active variables, while multi-way relations are modeled by hierarchical constraints. Daemons are used to sequence between system states in reaction to changes in variable values. The constraint network is efficiently maintained by an incremental constraint solver based on an enhancement of SkyBlue. Multiple devices are used to interact with the synthetic world through the use of various interaction paradigms, including immersive environments with visual and audio feedback. Interaction techniques range from direct manipulation, to gestural input and three-dimensional virtual tools. Adaptive pattern recognition is used to increase input device expressiveness by enhancing sensor data with classification information. Virtual tools, which are encapsulations of visual appearance and behavior, present a selective view of manipulated models' information and offer an interaction metaphor to control it. Since virtual tools are first class objects, they can be assembled into more complex tools, much in the same way that simple tools are built on top of a modeling hierarchy. The architecture is currently being used to build a virtual reality animation system.}, url = {https://www.crs4.it/vic/data/papers/uist93.pdf} } @InCollection{Gobbetti:1993:VBT, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1993:08", idxdatepub = "1993:10", idxkey = {VB2,3DGUI, DESIGN, ANIMATION}, idxproject = {VB2}, author = {Enrico Gobbetti and Jean-Francis Balaguer}, title = {{Virtuality Builder} {II}: On The Topic of {3D} Interaction}, booktitle = {Virtual Reality and Multimedia}, editor = {Daniel Thalmann and Nadia Magnenat-Thalmann}, year = 1993, publisher = pub-WILEY, address = pub-WILEY:adr, abstract = {Most of today's user interfaces for {3D} graphics systems still predominantly use {2D} widgets, even though current graphical hardware should make it possible to create applications in which the user directly manipulates aspects of three-dimensional synthetic worlds. The difficulties associated with achieving the key goal of immersion has led the research in virtual environments to concentrate far more on the development of new input and display devices than on higher-level techniques for {3D} interaction. It is only recently that interaction with synthetic worlds has tried to go beyond straightforward interpretation of physical device data. The design space for {3D} interaction tools and techniques remains mostly unexplored, while being far larger than in standard {2D} applications. Moreover, as stated by Myers, \"the only reliable way to generate quality interfaces is to test prototypes with users and modify the design based on their comments\". The creation of complex interactive applications is an inherently iterative process that requires user interface tools, such as toolkits or frameworks. The lack of experience in {3D} interfaces makes it extremely difficult to design {3D} interface toolkits or frameworks. We believe that offering the possibility to rapidly prototype and test novel interaction techniques should be the primary goal of such tools. It is therefore more important for these tools to provide a wide range of interaction components, than to enforce a particular interface style. In this paper we present the {Virtuality Builder II} ({VB2}) framework developed at the Swiss Federal Institute of Technology for the construction of {3D} interactive applications. First, we'll give an overview of the design concepts of {VB2}. Next, we will concentrate on how users interact with dynamic models through direct manipulation, gestures, and virtual tools.}, url = {https://www.crs4.it/vic/data/papers/vrmm93.pdf} } @InCollection{Gobbetti:1993:BIA, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1993:01", idxdatepub = "1993:05", idxkey = {VB2,3DGUI, DESIGN, ANIMATION}, idxproject = {VB2}, author = {Enrico Gobbetti and Jean-Francis Balaguer and Angelo Mangili and Russell Turner}, title = {Building an Interactive {3D} Animation System}, booktitle = {Object-Oriented Applications}, publisher = pub-PH, address = pub-PH:adr, year = 1993, editor = {Bertrand Meyer and Jean-Marc Nerson}, pages = {211-242}, url = {https://www.crs4.it/vic/data/papers/ooapp.pdf} } @InProceedings{Kalra:1993:MTF, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1993:04", idxdatepub = "1993:11", idxkey = {ANIMATION}, idxproject = {VB2}, author = {Prem Kalra and Enrico Gobbetti and Daniel Thalmann and Nadia Magnenat-Thalmann}, title = {A Multimedia Testbed for Facial Animation Control}, booktitle = {Proceedings International Conference on Multi-Media Modeling (MMM'93)}, address = {Conference held in Singapore}, abstract = {This paper presents an open testbed for controlling facial animation. The adopted controlling means can act at different levels of abstraction (specification). These means of control can be associated with different interactive devices and media thereby allowing a greater flexibility and freedom to the animator. Possibility of integration and mixing of control means provides a general platform where a user can experiment with his choice of control method. Experiments with input accessories like the keyboard of a music sinthesizer and gestures from the DataGlove are illustrated.}, keywords = {Facial Expression, Animation Control, Gestures, MIDI Keyboard, DataGlove, Performance Animation}, month = nov, year = {1993}, pages = {59--72}, url = {https://www.crs4.it/vic/data/papers/mmm93.pdf} } @InProceedings{Turner:1993:IGCb, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1993:01", idxdatepub = "1993:05", idxkey = {DESIGN, ANIMATION}, idxproject = {VB2}, author = {Russell Turner and Enrico Gobbetti and Jean-Francis Balaguer and Angelo Mangili}, title = {An Interactive {3D} Graphics Class Library in {Eiffel}}, booktitle = {Proceedings Eurographics Workshop on Object-Oriented Graphics}, series = {Eurographics Workshop Proceedings Series}, publisher = pub-EUROGRAPHICS, address = {Conference held in Champ\'ery, Switzerland}, year = {1993}, pages = {}, abstract = {An object-oriented design is presented for building interactive {3D} graphics applications. The design takes the form of a library of classes written in {Eiffel}, an object oriented language with multiple inheritance, static typing, dynamic binding, garbage collection, and assertion checking. The classes form a set of reusable components from which a variety of other interactive {3D} graphics applications could easily be constructed. A discussion of the overall design goals and philosophy is given. This is followed by a summary description of the purpose and implementation of each of the component class clusters. Finally, the issues are discussed of applying object-oriented techniques to interactive {3D} graphics, including encapsulation of existing software and the implementation on a Silicon Graphics Iris workstation.}, url = {} } @InCollection{Turner:1993:IGC, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1993:05", idxdatepub = "1993:09", idxkey = {DESIGN, ANIMATION}, idxproject = {VB2}, author = {Russell Turner and Enrico Gobbetti and Jean-Francis Balaguer and Angelo Mangili}, title = {An Interactive {3D} Graphics Class Library in {Eiffel}}, booktitle = {Advances in Object Oriented Graphics {II}}, editor = {Edwin Blake and Chris Laffra and Vicky {de Mey} and Xavier Pintado}, publisher = pub-SV, address = pub-SV:adr, year = {1993}, pages = {271--289}, isbn = {3-540-58314-9}, note = {Revised version of Eurographics Workshop on Object-Oriented Graphics'93 paper}, url = {https://www.crs4.it/vic/data/papers/ewoog.pdf} } @PhdThesis{Gobbetti:1993:VBV, idxstatus = "Thesis", idxmedium = "Text", idxdatesub = "1993:11", idxdatepub = "1993:12", idxkey = {VB2,3DGUI, DESIGN, MODELING, ANIMATION}, idxproject = {VB2}, author = {Enrico Gobbetti}, title = {Virtuality Builder II: Vers une architecture pour l'interaction avec des mondes synth{\'e}tiques}, school = inst-EPFL, address = inst-EPFL:adr, year = {1993}, abstract = {Virtuality Builder II is an object-oriented architecture based on constraint imperative programming to be used for the creation of interactive {3D} applications. Declarative and imperative techniques are jointly used to define a system's behavior: active variables store the state of the system, hierarchical multi-way constraints declaratively represent long-lived relations between objects, daemons are used for imperatively sequencing between system states; higher-order control techniques make it possible to integrate the constraints in a dynamic environment. A central state manager is responsible of incrementally satisfying the constraints, of maintaining the history of values, of handling higher order control, and of activating the daemons. This model frees the programmer from the tedious task of maintaining relationships by hand, while offering the means to uniformly represent all the aspects of an interactive application and to obtain a strong coupling between application and interaction objects, which is an essential feature of {3D} interfaces. Interaction objects are created by assembling active components that analyze their environment and react to its changes. This allows the integration of various interaction techniques, such as gestural input and constrained manipulation, and the creation of virtual tools, which are first-class mediators able to display a selective view of application objects and to offer the interaction metaphor to manipulate them.}, MASKEDurl = {https://www.crs4.it/{\~{}}gobbetti/publications/VB2-PhD.html}, } @Booklet{Gobbetti:1993:VRC, idxstatus = "Lecture", idxmedium = "Text", idxdatesub = "1993:06", idxdatepub = "1993:06", idxkey = {3DGUI}, idxproject = {VB2}, title = {A Virtual Reality Cookbook}, author = {Enrico Gobbetti and Jean-Francis Balaguer}, howpublished = {Tutorial notes, Computer Graphics International}, address = {Conference held in Lausanne, Switzerland}, month = jun, year = 1993, url = {https://www.crs4.it/vic/data/papers/cgi93-tutorial.pdf}, thumbnail = {https://www.crs4.it/vic/img/thumb-none.jpg}, } %################################ %### 1992 %################################ @InCollection{Gobbetti:1992:OOD, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1992:02", idxdatepub = "1992:08", idxkey = {5DTOOLKIT,DESIGN}, idxproject = {VB2}, author = {Enrico Gobbetti and Russell Turner}, title = {Object-Oriented Design of Dynamic Graphics Applications}, booktitle = {New Trends in Animation and Visualization}, editor = {Daniel Thalmann and Nadia Magnenat-Thalmann}, publisher = pub-WILEY, address = pub-WILEY:adr, year = {1992}, pages = {43--58}, url = {https://www.crs4.it/vic/data/papers/oog92.pdf} } %################################ %### 1991 %################################ @InProceedings{Turner:1991:PIC, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1991:02", idxdatepub = "1991:08", idxkey = {5DTOOLKIT,3DGUI}, idxproject = {VB2}, author = {Russell Turner and Jean-Francis Balaguer and Enrico Gobbetti and Daniel Thalmann}, editor = {N. M. Patrikalakis}, title = {Physically-based Interactive Camera Motion Control using {3D} Input Devices}, booktitle = {Scientific Visualization of Physical Phenomena: Proceedings of CG International Tokyo}, pages = {135--145}, publisher = pub-SV, address = {Conference held in Tokyo, Japan}, year = {1991}, abstract = {The newest three-dimensional input devices, together with high speed graphics workstations, make it possible to interactively specify virtual camera motions for animation in real time. In this paper, we describe how naturalistic interaction and realistic-looking motion can be achieved by using a physically-based model of the camera's behavior. Our approach is to create an abstract physical model of the camera, using the laws of classical mechanics, which is used to simulate the virtual camera motion in real time in response to force data from the various {3D} input devices (e.g. the Spaceball, Polhemus and DataGlove). The behavior of the model is determined by several physical parameters such as mass, moment of inertia, and various friction coefficients which can all be varied interactively, and by constraints on the camera's degrees of freedom which can be simulated by setting certain friction parameters to very high values. This allows us to explore a continuous range of physically-based metaphors for controlling the camera motion. We present the results of experiments with several of these metaphors and contrast them with existing ones.}, keywords = {3D Interaction, Motion Control, Dynamics, Virtual Cameras}, url = {https://www.crs4.it/vic/data/papers/cgi91.pdf} } @InCollection{Turner:1991:ISW, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1991:02", idxdatepub = "1991:08", idxkey = {5DTOOLKIT,3DGUI}, idxproject = {5DTOOLKIT}, author = {Russell Turner and Jean-Francis Balaguer and Enrico Gobbetti and Daniel Thalmann}, editor = {G. N. Schmitt}, title = {Interactive Scene Walkthrough Using a Physically-Based Virtual Camera}, booktitle = {CAAD Futures '91, Conference Proceedings}, publisher = pub-VIEWEG, address = pub-VIEWEG:adr, pages = {511--520}, year = {1991}, abstract = {One of the most powerful results of recent advances in graphics hardware is the ability of a computer user to interactively explore a virtual buildin gor landscape. The newest three-dimensional input devices, together with high speed {3D} graphics workstations, make it possible to view and move through a {3D} scene by interactively controlling the motion of a virtual camera. In this paper, we describe how natural and intuitive control of building walkthrough can be achieved by using a physically-based model of the virtual camera's behavior. Using the laws of classical mechanics to create and abstract physical model of the camera, we then simulate the virtual camera motion in real time in response to force date from the various {3D} input devices (e.g. the Spaceball and Polhemus 3Space Digitizer). The resulting interactive behavior of the model is determined by several physical parameters such as mass, moment of inertia, and various friction coefficients which can all be varied interactively, and by constraints on the camera's degrees of freedom. This allows us to explore a continuous range of physically-based metaphors for controlling the camera motion. We present the results of experiments using several of these metaphors for virtual camera motion and describe the effects of the various physical parameters.}, keywords = {3D Interaction, Motion Control, Dynamics, Virtual Cameras}, url = {https://www.crs4.it/vic/data/papers/caad91.pdf} } %################################ %### 1990 %################################ @InProceedings{Turner:1990:OOM, idxstatus = "Published", idxmedium = "Text", idxdatesub = "1990:02", idxdatepub = "1990:08", idxkey = {5DTOOLKIT,3DGUI,DESIGN}, idxproject = {5DTOOLKIT}, author = {Russell Turner and Enrico Gobbetti and Jean-Francis Balaguer and Angelo Mangili and Daniel Thalmann}, title = {An Object-oriented Methodology with Dynamic Variables for Animation and Scientific Visualization}, booktitle = {Proceedings Computer Graphics International}, publisher = pub-SV, address = {Conference held in Singapore}, pages = {317--328}, year = {1990}, abstract = {An object-oriented design is presented for building dynamic three-dimensional applications. This design takes the form of the Fifth Dimension Toolkit consisting of a set of interrelated classes whose instances may be connected together in a variety of ways to form different applications. Animation is obtained by connecting graphical objects to dynamic variables, which are able to change their values over time by responding to events. The Fifth Dimension Toolkit is the core of the Fifth Dimension Project, a research project for animating synthetic actors in their environment. The design philosophy and methodology of the toolkit are also described, as well as some of the implementation issues for the Silicon Graphics Iris 4D workstation.}, keywords = {Object-Oriented, Animation, Scientific Visualization, Dynamic Variables.}, url = {https://www.crs4.it/vic/data/papers/cgi90.pdf} }