data
dict |
---|
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1DvgCK2YAyQ",
"doi": "10.1109/TVCG.2022.3175532",
"abstract": "A fundamental problem in visual data exploration concerns whether observed patterns are true or merely random noise. This problem is especially pertinent in visual analytics, where the user is presented with a barrage of patterns, without any guarantees of their statistical validity. Recently this problem has been formulated in terms of statistical testing and the multiple comparisons problem. In this paper, we identify two levels of multiple comparisons problems in visualization: the within-view and the between-view problem. We develop a statistical testing procedure for interactive data exploration that controls the family-wise error rate on both levels. The procedure enables the user to determine the compatibility of their assumptions about the data with visually observed patterns. We present use-cases where we visualize and evaluate patterns in real-world data.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A fundamental problem in visual data exploration concerns whether observed patterns are true or merely random noise. This problem is especially pertinent in visual analytics, where the user is presented with a barrage of patterns, without any guarantees of their statistical validity. Recently this problem has been formulated in terms of statistical testing and the multiple comparisons problem. In this paper, we identify two levels of multiple comparisons problems in visualization: the within-view and the between-view problem. We develop a statistical testing procedure for interactive data exploration that controls the family-wise error rate on both levels. The procedure enables the user to determine the compatibility of their assumptions about the data with visually observed patterns. We present use-cases where we visualize and evaluate patterns in real-world data.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A fundamental problem in visual data exploration concerns whether observed patterns are true or merely random noise. This problem is especially pertinent in visual analytics, where the user is presented with a barrage of patterns, without any guarantees of their statistical validity. Recently this problem has been formulated in terms of statistical testing and the multiple comparisons problem. In this paper, we identify two levels of multiple comparisons problems in visualization: the within-view and the between-view problem. We develop a statistical testing procedure for interactive data exploration that controls the family-wise error rate on both levels. The procedure enables the user to determine the compatibility of their assumptions about the data with visually observed patterns. We present use-cases where we visualize and evaluate patterns in real-world data.",
"title": "Visual Data Exploration as a Statistical Testing Procedure: Within-view and Between-view Multiple Comparisons",
"normalizedTitle": "Visual Data Exploration as a Statistical Testing Procedure: Within-view and Between-view Multiple Comparisons",
"fno": "09779102",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Statistical Analysis",
"Visual Analytics",
"Error Analysis",
"Task Analysis",
"Standards",
"Licenses",
"Interactive Data Exploration And Discovery",
"Information Visualization",
"Statistical Testing"
],
"authors": [
{
"givenName": "Rafael",
"surname": "Savvides",
"fullName": "Rafael Savvides",
"affiliation": "Computer Science, University of Helsinki, 3835 Helsinki, Uusimaa, Finland, 00560",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Andreas",
"surname": "Henelius",
"fullName": "Andreas Henelius",
"affiliation": "Computer Science, University of Helsinki, 3835 Helsinki, Uusimaa, Finland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Emilia",
"surname": "Oikarinen",
"fullName": "Emilia Oikarinen",
"affiliation": "Computer Science, University of Helsinki, 3835 Helsinki, Uusimaa, Finland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kai",
"surname": "Puolamäki",
"fullName": "Kai Puolamäki",
"affiliation": "Department of Computer Science, Helsingin Yliopisto, 3835 University of Helsinki, Helsinki, Finland, 00014",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vast/2012/4752/0/06400491",
"title": "A correlative analysis process in a visual analytics environment",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2012/06400491/12OmNAkEU1K",
"parentPublication": {
"id": "proceedings/vast/2012/4752/0",
"title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itng/2012/4654/0/4654a848",
"title": "An Exploratory Study of the Users' Behavior on Social Network Sites",
"doi": null,
"abstractUrl": "/proceedings-article/itng/2012/4654a848/12OmNy1SFLB",
"parentPublication": {
"id": "proceedings/itng/2012/4654/0",
"title": "Information Technology: New Generations, Third International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/06/v1363",
"title": "High-Dimensional Visual Analytics: Interactive Exploration Guided by Pairwise Views of Point Distributions",
"doi": null,
"abstractUrl": "/journal/tg/2006/06/v1363/13rRUx0xPIs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122899",
"title": "A Visual Analytics Approach to Multiscale Exploration of Environmental Time Series",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122899/13rRUxDqS8g",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/12/ttg2013121972",
"title": "Decision Exploration Lab: A Visual Analytics Solution for Decision Management",
"doi": null,
"abstractUrl": "/journal/tg/2013/12/ttg2013121972/13rRUxYINfa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06876014",
"title": "Visual Exploration of Sparse Traffic Trajectory Data",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06876014/13rRUxjQyvk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2017/3163/0/08585620",
"title": "ClockPetals: Interactive Sequential Analysis of Traffic Patterns VAST Challenge MC1 Award: Multi-Challenge Award for Aesthetic Design",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2017/08585620/17D45WIXbRE",
"parentPublication": {
"id": "proceedings/vast/2017/3163/0",
"title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09903295",
"title": "LargeNetVis: Visual Exploration of Large Temporal Networks Based on Community Taxonomies",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09903295/1GZokLgYdTW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09246250",
"title": "Co-Bridges: Pair-wise Visual Connection and Comparison for Multi-item Data Streams",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09246250/1olE35lxD8c",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/10/09382844",
"title": "Interactive Visual Exploration of Longitudinal Historical Career Mobility Data",
"doi": null,
"abstractUrl": "/journal/tg/2022/10/09382844/1saZr0JHX5C",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09774005",
"articleId": "1DjDpHtWZfa",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09779066",
"articleId": "1DvgD0GMunm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1DwUBOf7vMI",
"name": "ttg555501-09779102s1-supp1-3175532.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09779102s1-supp1-3175532.pdf",
"extension": "pdf",
"size": "411 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1DjDpvFkiwE",
"doi": "10.1109/TVCG.2022.3174769",
"abstract": "The Morse-Smale complex is a well studied topological structure that represents the gradient flow behavior between critical points of a scalar function. It supports multi-scale topological analysis and visualization of feature-rich scientific data. Several parallel algorithms have been proposed towards the fast computation of the 3D Morse-Smale complex. Its computation continues to pose significant algorithmic challenges. In particular, the non-trivial structure of the connections between the saddle critical points are not amenable to parallel computation. This paper describes a fine grained parallel algorithm for computing the Morse-Smale complex and a GPU implementation (gMSC). The algorithm first determines the saddle-saddle reachability via a transformation into a sequence of vector operations, and next computes the paths between saddles by transforming it into a sequence of matrix operations. Computational experiments show that the method achieves up to 8.6x speedup over pyms3d and 6x speedup over TTK, the current shared memory implementations. The paper also presents a comprehensive experimental analysis of different steps of the algorithm and reports on their contribution towards runtime performance. Finally, it introduces a CPU based data parallel algorithm for simplifying the Morse-Smale complex via iterative critical point pair cancellation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The Morse-Smale complex is a well studied topological structure that represents the gradient flow behavior between critical points of a scalar function. It supports multi-scale topological analysis and visualization of feature-rich scientific data. Several parallel algorithms have been proposed towards the fast computation of the 3D Morse-Smale complex. Its computation continues to pose significant algorithmic challenges. In particular, the non-trivial structure of the connections between the saddle critical points are not amenable to parallel computation. This paper describes a fine grained parallel algorithm for computing the Morse-Smale complex and a GPU implementation (gMSC). The algorithm first determines the saddle-saddle reachability via a transformation into a sequence of vector operations, and next computes the paths between saddles by transforming it into a sequence of matrix operations. Computational experiments show that the method achieves up to 8.6x speedup over pyms3d and 6x speedup over TTK, the current shared memory implementations. The paper also presents a comprehensive experimental analysis of different steps of the algorithm and reports on their contribution towards runtime performance. Finally, it introduces a CPU based data parallel algorithm for simplifying the Morse-Smale complex via iterative critical point pair cancellation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The Morse-Smale complex is a well studied topological structure that represents the gradient flow behavior between critical points of a scalar function. It supports multi-scale topological analysis and visualization of feature-rich scientific data. Several parallel algorithms have been proposed towards the fast computation of the 3D Morse-Smale complex. Its computation continues to pose significant algorithmic challenges. In particular, the non-trivial structure of the connections between the saddle critical points are not amenable to parallel computation. This paper describes a fine grained parallel algorithm for computing the Morse-Smale complex and a GPU implementation (gMSC). The algorithm first determines the saddle-saddle reachability via a transformation into a sequence of vector operations, and next computes the paths between saddles by transforming it into a sequence of matrix operations. Computational experiments show that the method achieves up to 8.6x speedup over pyms3d and 6x speedup over TTK, the current shared memory implementations. The paper also presents a comprehensive experimental analysis of different steps of the algorithm and reports on their contribution towards runtime performance. Finally, it introduces a CPU based data parallel algorithm for simplifying the Morse-Smale complex via iterative critical point pair cancellation.",
"title": "A GPU Parallel Algorithm for Computing Morse-Smale Complexes",
"normalizedTitle": "A GPU Parallel Algorithm for Computing Morse-Smale Complexes",
"fno": "09773959",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Parallel Algorithms",
"Graphics Processing Units",
"Three Dimensional Displays",
"Runtime",
"Manifolds",
"Indexes",
"Iterative Algorithms",
"Scalar Field",
"Morse Smale Complex",
"Shared Memory Parallel Algorithm",
"GPU"
],
"authors": [
{
"givenName": "Varshini",
"surname": "Subhash",
"fullName": "Varshini Subhash",
"affiliation": "Computer Science and Automation, Indian Institute of Science, 29120 Bangalore, Karnataka, India, 560012",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Karran",
"surname": "Pandey",
"fullName": "Karran Pandey",
"affiliation": "Computer Science and Automation, Indian Institute of Science, 29120 Bangalore, Karnataka, India",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Vijay",
"surname": "Natarajan",
"fullName": "Vijay Natarajan",
"affiliation": "Computer Science and Automation, Indian Institute of Science, Bangalore, Karnataka, India, 560012",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ipdps/2012/4675/0/4675a484",
"title": "The Parallel Computation of Morse-Smale Complexes",
"doi": null,
"abstractUrl": "/proceedings-article/ipdps/2012/4675a484/12OmNrFBQ1B",
"parentPublication": {
"id": "proceedings/ipdps/2012/4675/0",
"title": "Parallel and Distributed Processing Symposium, International",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciap/2007/2877/0/28770337",
"title": "Multi-resolution Morse-Smale Complexes for Terrain Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/iciap/2007/28770337/12OmNsbY6Tz",
"parentPublication": {
"id": "proceedings/iciap/2007/2877/0",
"title": "2007 14th International Conference on Image Analysis and Processing - ICIAP 2007",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smi/2005/2379/0/01563237",
"title": "Volumetric data analysis using Morse-Smale complexes",
"doi": null,
"abstractUrl": "/proceedings-article/smi/2005/01563237/12OmNxaw5b1",
"parentPublication": {
"id": "proceedings/smi/2005/2379/0",
"title": "Proceedings. International Conference on Shape Modeling and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/06/04376172",
"title": "Efficient Computation of Morse-Smale Complexes for Three-dimensional Scalar Functions",
"doi": null,
"abstractUrl": "/journal/tg/2007/06/04376172/13rRUwj7cp4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06875918",
"title": "Conforming Morse-Smale Complexes",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06875918/13rRUwjGoLG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/10/ttg2012101757",
"title": "Parallel Computation of 2D Morse-Smale Complexes",
"doi": null,
"abstractUrl": "/journal/tg/2012/10/ttg2012101757/13rRUxASuSL",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122014",
"title": "Computing Morse-Smale Complexes with Accurate Geometry",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122014/13rRUxZzAhE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/09/06065731",
"title": "Direct Feature Visualization Using Morse-Smale Complexes",
"doi": null,
"abstractUrl": "/journal/tg/2012/09/06065731/13rRUyv53Fn",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440824",
"title": "Shared-Memory Parallel Computation of Morse-Smale Complexes with Improved Accuracy",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440824/17D45Xh13tG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2020/8014/0/801400a036",
"title": "GPU Parallel Computation of Morse-Smale Complexes",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2020/801400a036/1qRO66SHgwU",
"parentPublication": {
"id": "proceedings/vis/2020/8014/0",
"title": "2020 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09773967",
"articleId": "1DjDoKqOJz2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09774005",
"articleId": "1DjDpHtWZfa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1DligVPxqDu",
"name": "ttg555501-09773959s1-supp1-3174769.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09773959s1-supp1-3174769.pdf",
"extension": "pdf",
"size": "977 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1DjDoKqOJz2",
"doi": "10.1109/TVCG.2022.3174805",
"abstract": "The recent proliferation of immersive technology has led to the rapid adoption of consumer-ready hardware for Augmented Reality (AR) and Virtual Reality (VR). While this increase has resulted in a variety of platforms that can offer a richer interactive experience, the advances in technology bring more variability in display types, interaction sensors and use cases. This provides a spectrum of device-specific interaction possibilities, with each offering a tailor-made solution for delivering immersive experiences to users, but often with an inherent lack of standardisation across devices and applications. To address this, a systematic review and an evaluation of explicit, task-based interaction methods in immersive environments are presented in this paper. A corpus of papers published between 2013 and 2020 is reviewed to thoroughly explore state-of-the-art user studies, which investigate input methods and their implementation for immersive interaction tasks (pointing, selection, translation, rotation, scale, viewport, menu-based and abstract). Focus is given to how input methods have been applied within the spectrum of immersive technology (AR, VR, XR). This is achieved by categorising findings based on display type, input method, study type, use case and task. Results illustrate key trends surrounding the benefits and limitations of each interaction technique and highlight the gaps in current research. The review provides a foundation for understanding the current and future directions for interaction studies in immersive environments, which, at this pivotal point in XR technology adoption, provides routes forward for achieving more valuable, intuitive and natural interactive experiences.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The recent proliferation of immersive technology has led to the rapid adoption of consumer-ready hardware for Augmented Reality (AR) and Virtual Reality (VR). While this increase has resulted in a variety of platforms that can offer a richer interactive experience, the advances in technology bring more variability in display types, interaction sensors and use cases. This provides a spectrum of device-specific interaction possibilities, with each offering a tailor-made solution for delivering immersive experiences to users, but often with an inherent lack of standardisation across devices and applications. To address this, a systematic review and an evaluation of explicit, task-based interaction methods in immersive environments are presented in this paper. A corpus of papers published between 2013 and 2020 is reviewed to thoroughly explore state-of-the-art user studies, which investigate input methods and their implementation for immersive interaction tasks (pointing, selection, translation, rotation, scale, viewport, menu-based and abstract). Focus is given to how input methods have been applied within the spectrum of immersive technology (AR, VR, XR). This is achieved by categorising findings based on display type, input method, study type, use case and task. Results illustrate key trends surrounding the benefits and limitations of each interaction technique and highlight the gaps in current research. The review provides a foundation for understanding the current and future directions for interaction studies in immersive environments, which, at this pivotal point in XR technology adoption, provides routes forward for achieving more valuable, intuitive and natural interactive experiences.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The recent proliferation of immersive technology has led to the rapid adoption of consumer-ready hardware for Augmented Reality (AR) and Virtual Reality (VR). While this increase has resulted in a variety of platforms that can offer a richer interactive experience, the advances in technology bring more variability in display types, interaction sensors and use cases. This provides a spectrum of device-specific interaction possibilities, with each offering a tailor-made solution for delivering immersive experiences to users, but often with an inherent lack of standardisation across devices and applications. To address this, a systematic review and an evaluation of explicit, task-based interaction methods in immersive environments are presented in this paper. A corpus of papers published between 2013 and 2020 is reviewed to thoroughly explore state-of-the-art user studies, which investigate input methods and their implementation for immersive interaction tasks (pointing, selection, translation, rotation, scale, viewport, menu-based and abstract). Focus is given to how input methods have been applied within the spectrum of immersive technology (AR, VR, XR). This is achieved by categorising findings based on display type, input method, study type, use case and task. Results illustrate key trends surrounding the benefits and limitations of each interaction technique and highlight the gaps in current research. The review provides a foundation for understanding the current and future directions for interaction studies in immersive environments, which, at this pivotal point in XR technology adoption, provides routes forward for achieving more valuable, intuitive and natural interactive experiences.",
"title": "A Review of Interaction Techniques for Immersive Environments",
"normalizedTitle": "A Review of Interaction Techniques for Immersive Environments",
"fno": "09773967",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Task Analysis",
"X Reality",
"Hardware",
"Three Dimensional Displays",
"Usability",
"Databases",
"Testing",
"Augmented Reality",
"Virtual Reality",
"HCI",
"Interaction",
"Input",
"Tasks",
"Usability",
"Multimodal",
"Immersive Topic Search Terms Location Study Type Technology Elicit",
"Compar",
"Virtual",
"Augmented",
"Mixed",
"VR",
"AR",
"MR",
"Immersive Title Display Input Mobile",
"HMD",
"HWD",
"Head Mounted",
"Head Worn",
"Tablet",
"Smart Phone",
"Interact",
"Input",
"Technique Abstract Interaction Method",
"Intuitive",
"Natural",
"Modality",
"Multimodal",
"Ambigu Abstract Modality Speech",
"Voice",
"Head",
"Hand",
"Gesture Abstract Tasks Point",
"Select",
"Manipulat",
"Mov",
"Translat",
"Position",
"Rotat",
"Scal",
"Menu"
],
"authors": [
{
"givenName": "Becky",
"surname": "Spittle",
"fullName": "Becky Spittle",
"affiliation": "DMT lab, Birmingham City University, 1725 Birmingham, West Midlands, United Kingdom of Great Britain and Northern Ireland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Maite",
"surname": "Frutos-Pascual",
"fullName": "Maite Frutos-Pascual",
"affiliation": "DMT Lab, Birmingham City University, 1725 Birmingham, West Midlands, United Kingdom of Great Britain and Northern Ireland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chris",
"surname": "Creed",
"fullName": "Chris Creed",
"affiliation": "DMT Lab, Birmingham City University, 1725 Birmingham, West Midlands, United Kingdom of Great Britain and Northern Ireland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ian",
"surname": "Williams",
"fullName": "Ian Williams",
"affiliation": "DMT Lab, Birmingham City University, Birmingham, West Midlands, United Kingdom of Great Britain and Northern Ireland, B47XG",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2014/2871/0/06802108",
"title": "A demonstration of tablet-based interaction panels for immersive environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802108/12OmNwGZNNk",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ucc-companion/2018/0359/0/035900a353",
"title": "A Review of Applications of Extended Reality in the Construction Domain",
"doi": null,
"abstractUrl": "/proceedings-article/ucc-companion/2018/035900a353/17D45WB0qbv",
"parentPublication": {
"id": "proceedings/ucc-companion/2018/0359/0",
"title": "2018 IEEE/ACM International Conference on Utility and Cloud Computing Companion (UCC Companion)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2018/9269/0/926900a204",
"title": "Comparative Reality: Measuring User Experience and Emotion in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2018/926900a204/17D45Xi9rXe",
"parentPublication": {
"id": "proceedings/aivr/2018/9269/0",
"title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a972",
"title": "Aroaro - A Tool for Distributed Immersive Mixed Reality Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a972/1CJefXNbhYs",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a085",
"title": "MEinVR: Multimodal Interaction Paradigms in Immersive Exploration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a085/1J7W98ABKwM",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a710",
"title": "Ex-Cit XR: Expert-elicitation of XR Techniques for Disengaging from IVEs",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a710/1J7WgdWP768",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a258",
"title": "Layouts of 3D Data Visualizations Small Multiples around Users in Immersive Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a258/1J7WxzHZHry",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090436",
"title": "NIDIT: Workshop on Novel Input Devices and Interaction Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090436/1jIxj9oCqpq",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a431",
"title": "A Taxonomy of Interaction Techniques for Immersive Augmented Reality based on an Iterative Literature Review",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a431/1yeD62B4zza",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a439",
"title": "XR Mobility Platform: Multi-Modal XR System Mounted on Autonomous Vehicle for Passenger’s Comfort Improvement",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a439/1yeQPu8aFlm",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09772276",
"articleId": "1DgjDz35pfi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09773959",
"articleId": "1DjDpvFkiwE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1DjDpHtWZfa",
"doi": "10.1109/TVCG.2022.3174656",
"abstract": "Occluding effects have been frequently used to present weather conditions and environments in cartoon animations, such as raining, snowing, moving leaves, and moving petals. While these effects greatly enrich the visual appeal of the cartoon animations, they may also cause undesired occlusions on the content area, which significantly complicate the analysis and processing of the cartoon animations. In this paper, we make the first attempt to separate the occluding effects and content for cartoon animations. The major challenge of this problem is that, unlike natural effects that are realistic and small-sized, the effects of cartoons are usually stylistic and large-sized. Besides, effects in cartoons are manually drawn, so their motions are more unpredictable than realistic effects. To separate occluding effects and content for cartoon animations, we propose to leverage the difference in the motion patterns of the effects and the content, and capture the locations of the effects based on a multi-scale flow-based effect prediction (MFEP) module. A dual-task learning system is designed to extract the effect video and reconstruct the effect-removed content video at the same time. We apply our method on a large number of cartoon videos of different content and effects. Experiments show that our method significantly outperforms the existing methods. We further demonstrate how the separated effects and content facilitate the analysis and processing of cartoon videos through different applications, including segmentation, inpainting, and effect migration.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Occluding effects have been frequently used to present weather conditions and environments in cartoon animations, such as raining, snowing, moving leaves, and moving petals. While these effects greatly enrich the visual appeal of the cartoon animations, they may also cause undesired occlusions on the content area, which significantly complicate the analysis and processing of the cartoon animations. In this paper, we make the first attempt to separate the occluding effects and content for cartoon animations. The major challenge of this problem is that, unlike natural effects that are realistic and small-sized, the effects of cartoons are usually stylistic and large-sized. Besides, effects in cartoons are manually drawn, so their motions are more unpredictable than realistic effects. To separate occluding effects and content for cartoon animations, we propose to leverage the difference in the motion patterns of the effects and the content, and capture the locations of the effects based on a multi-scale flow-based effect prediction (MFEP) module. A dual-task learning system is designed to extract the effect video and reconstruct the effect-removed content video at the same time. We apply our method on a large number of cartoon videos of different content and effects. Experiments show that our method significantly outperforms the existing methods. We further demonstrate how the separated effects and content facilitate the analysis and processing of cartoon videos through different applications, including segmentation, inpainting, and effect migration.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Occluding effects have been frequently used to present weather conditions and environments in cartoon animations, such as raining, snowing, moving leaves, and moving petals. While these effects greatly enrich the visual appeal of the cartoon animations, they may also cause undesired occlusions on the content area, which significantly complicate the analysis and processing of the cartoon animations. In this paper, we make the first attempt to separate the occluding effects and content for cartoon animations. The major challenge of this problem is that, unlike natural effects that are realistic and small-sized, the effects of cartoons are usually stylistic and large-sized. Besides, effects in cartoons are manually drawn, so their motions are more unpredictable than realistic effects. To separate occluding effects and content for cartoon animations, we propose to leverage the difference in the motion patterns of the effects and the content, and capture the locations of the effects based on a multi-scale flow-based effect prediction (MFEP) module. A dual-task learning system is designed to extract the effect video and reconstruct the effect-removed content video at the same time. We apply our method on a large number of cartoon videos of different content and effects. Experiments show that our method significantly outperforms the existing methods. We further demonstrate how the separated effects and content facilitate the analysis and processing of cartoon videos through different applications, including segmentation, inpainting, and effect migration.",
"title": "Multi-scale Flow-based Occluding Effect and Content Separation for Cartoon Animations",
"normalizedTitle": "Multi-scale Flow-based Occluding Effect and Content Separation for Cartoon Animations",
"fno": "09774005",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Animation",
"Image Restoration",
"Rain",
"Adaptive Optics",
"Visualization",
"Optical Imaging",
"Deep Learning",
"Cartoon Effect Content Separation",
"Cartoon Effect Removal",
"Optical Flow"
],
"authors": [
{
"givenName": "Cheng",
"surname": "Xu",
"fullName": "Cheng Xu",
"affiliation": "School of Computer Science and Engineering, South China University of Technology, 26467 guangzhou, Guangdong, China, 510006",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wei",
"surname": "Qu",
"fullName": "Wei Qu",
"affiliation": "Computer Science and engineering, South China University of Technology, 26467 Guangzhou, Guangdong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xuemiao",
"surname": "Xu",
"fullName": "Xuemiao Xu",
"affiliation": "Computer Science and Engineering, South China University of Technology, 26467 Guangzhou, Guangdong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xueting",
"surname": "Liu",
"fullName": "Xueting Liu",
"affiliation": "Computer Science and engineering, Caritas Institute of Higher Education, 66391 Hong Kong, Hong Kong, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/pacific-graphics/2010/4205/0/4205a001",
"title": "Procedural Modeling of Water Caustics and Foamy Water for Cartoon Animation",
"doi": null,
"abstractUrl": "/proceedings-article/pacific-graphics/2010/4205a001/12OmNCf1Dqs",
"parentPublication": {
"id": "proceedings/pacific-graphics/2010/4205/0",
"title": "Pacific Conference on Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sera/2007/2867/0/28670924",
"title": "An Efficient Expression on Cartoon Rendering Scheme in Game Characters",
"doi": null,
"abstractUrl": "/proceedings-article/sera/2007/28670924/12OmNqBKTX2",
"parentPublication": {
"id": "proceedings/sera/2007/2867/0",
"title": "5th ACIS International Conference on Software Engineering Research, Management & Applications (SERA 2007)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cicn/2014/6929/0/6929a697",
"title": "Design of an Automatic Cartoon Movie Builder (ACMB) System for Generation of a Cartoon Movie from a Given Story",
"doi": null,
"abstractUrl": "/proceedings-article/cicn/2014/6929a697/12OmNvjyxQU",
"parentPublication": {
"id": "proceedings/cicn/2014/6929/0",
"title": "2014 International Conference on Computational Intelligence and Communication Networks (CICN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2010/6821/0/05444656",
"title": "Expressive haptic rendering with cartoon-inspired effects",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2010/05444656/12OmNyNQSNU",
"parentPublication": {
"id": "proceedings/haptics/2010/6821/0",
"title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/04/ttg2009040618",
"title": "Vectorizing Cartoon Animations",
"doi": null,
"abstractUrl": "/journal/tg/2009/04/ttg2009040618/13rRUIJcWlh",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/03/06910280",
"title": "2.5D Cartoon Hair Modeling and Manipulation",
"doi": null,
"abstractUrl": "/journal/tg/2015/03/06910280/13rRUIJuxpC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/07/ttg2012071156",
"title": "EXCOL: An EXtract-and-COmplete Layering Approach to Cartoon Animation Reusing",
"doi": null,
"abstractUrl": "/journal/tg/2012/07/ttg2012071156/13rRUwgQpDt",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2005/05/v0540",
"title": "Stroke Surfaces: Temporally Coherent Artistic Animations from Video",
"doi": null,
"abstractUrl": "/journal/tg/2005/05/v0540/13rRUy0HYRf",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/lt/2010/02/tlt2010020139",
"title": "Layered Architecture for Automatic Generation of Conflictive Animations in Programming Education",
"doi": null,
"abstractUrl": "/journal/lt/2010/02/tlt2010020139/13rRUynpT9H",
"parentPublication": {
"id": "trans/lt",
"title": "IEEE Transactions on Learning Technologies",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093346",
"title": "Neural Puppet: Generative Layered Cartoon Characters",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093346/1jPbkInDnXy",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09773959",
"articleId": "1DjDpvFkiwE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09779102",
"articleId": "1DvgCK2YAyQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1Dqhdqm9K6Y",
"name": "ttg555501-09774005s1-supp2-3174656.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09774005s1-supp2-3174656.mp4",
"extension": "mp4",
"size": "30.1 MB",
"__typename": "WebExtraType"
},
{
"id": "1DqhdCWt2Gk",
"name": "ttg555501-09774005s1-supp1-3174656.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09774005s1-supp1-3174656.pdf",
"extension": "pdf",
"size": "2.05 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1DgjDz35pfi",
"doi": "10.1109/TVCG.2022.3173889",
"abstract": "Information uncertainty is ubiquitous in everyday life, including in domains as diverse as weather forecasts, investments, and health risks. Knowing how to interpret and integrate this uncertain information is vital for making good decisions, but this can be difficult for experts and novices alike. In this study, we examine whether brief, focused practice can improve peoples ability to understand and integrate bivariate Gaussian uncertainty visualized via ensemble displays, summary displays, and distributional displays, and we examine whether this is influenced by the complexity of the displayed information. In two experiments (N=118 and 56), decision making was faster and more accurate after practice relative to before practice. Furthermore, the performance improvements transferred to use of display types that were not practiced. This suggests that practice with feedback may improve underlying skills in probabilistic reasoning and provides a promising approach to improve peoples decision making under uncertainty.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Information uncertainty is ubiquitous in everyday life, including in domains as diverse as weather forecasts, investments, and health risks. Knowing how to interpret and integrate this uncertain information is vital for making good decisions, but this can be difficult for experts and novices alike. In this study, we examine whether brief, focused practice can improve peoples ability to understand and integrate bivariate Gaussian uncertainty visualized via ensemble displays, summary displays, and distributional displays, and we examine whether this is influenced by the complexity of the displayed information. In two experiments (N=118 and 56), decision making was faster and more accurate after practice relative to before practice. Furthermore, the performance improvements transferred to use of display types that were not practiced. This suggests that practice with feedback may improve underlying skills in probabilistic reasoning and provides a promising approach to improve peoples decision making under uncertainty.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Information uncertainty is ubiquitous in everyday life, including in domains as diverse as weather forecasts, investments, and health risks. Knowing how to interpret and integrate this uncertain information is vital for making good decisions, but this can be difficult for experts and novices alike. In this study, we examine whether brief, focused practice can improve peoples ability to understand and integrate bivariate Gaussian uncertainty visualized via ensemble displays, summary displays, and distributional displays, and we examine whether this is influenced by the complexity of the displayed information. In two experiments (N=118 and 56), decision making was faster and more accurate after practice relative to before practice. Furthermore, the performance improvements transferred to use of display types that were not practiced. This suggests that practice with feedback may improve underlying skills in probabilistic reasoning and provides a promising approach to improve peoples decision making under uncertainty.",
"title": "Practice improves performance of a 2D uncertainty integration task within and across visualizations",
"normalizedTitle": "Practice improves performance of a 2D uncertainty integration task within and across visualizations",
"fno": "09772276",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Uncertainty",
"Data Visualization",
"Task Analysis",
"Image Color Analysis",
"Training",
"Cognition",
"Government",
"Decision Making",
"Training",
"Visualization",
"Uncertainty"
],
"authors": [
{
"givenName": "Sarah A",
"surname": "Kusumastuti",
"fullName": "Sarah A Kusumastuti",
"affiliation": "Psychology, University of Southern California, 5116 Los Angeles, California, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kimberly A",
"surname": "Pollard",
"fullName": "Kimberly A Pollard",
"affiliation": "Human Research and Engineering Directorate, DEVCOM Army Research Laboratory, Los Angeles, California, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ashley H",
"surname": "Oiknine",
"fullName": "Ashley H Oiknine",
"affiliation": "Research and Engineering, DCS Corporation, 218964 Alexandria, Virginia, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bianca",
"surname": "Dalangin",
"fullName": "Bianca Dalangin",
"affiliation": "Research and Engineering, DCS Corporation, 218964 Alexandria, Virginia, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tiffany R",
"surname": "Raber",
"fullName": "Tiffany R Raber",
"affiliation": "Human Research and Engineering Directorate, DEVCOM Army Research Laboratory, Los Angeles, California, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Benjamin Taylor",
"surname": "Files",
"fullName": "Benjamin Taylor Files",
"affiliation": "Human Research and Engineering Directorate, DEVCOM Army Research Laboratory, Los Angeles, California, United States, 90094",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-infovis/2002/1751/0/17510037",
"title": "Visualizing Data with Bounded Uncertainty",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/2002/17510037/12OmNrFkeWk",
"parentPublication": {
"id": "proceedings/ieee-infovis/2002/1751/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/case/2012/0430/0/06386365",
"title": "Uncertainty management in remanufacturing: A review",
"doi": null,
"abstractUrl": "/proceedings-article/case/2012/06386365/12OmNwD1pWU",
"parentPublication": {
"id": "proceedings/case/2012/0430/0",
"title": "2012 IEEE International Conference on Automation Science and Engineering (CASE 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mise/2015/7055/0/7055a007",
"title": "Modularity for Uncertainty",
"doi": null,
"abstractUrl": "/proceedings-article/mise/2015/7055a007/12OmNxG1yWW",
"parentPublication": {
"id": "proceedings/mise/2015/7055/0",
"title": "2015 IEEE/ACM 7th International Workshop on Modeling in Software Engineering (MiSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/re/2014/3031/0/06912245",
"title": "Supporting early decision-making in the presence of uncertainty",
"doi": null,
"abstractUrl": "/proceedings-article/re/2014/06912245/12OmNzICEVd",
"parentPublication": {
"id": "proceedings/re/2014/3031/0",
"title": "2014 IEEE 22nd International Requirements Engineering Conference (RE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017624",
"title": "Imagining Replications: Graphical Prediction & Discrete Visualizations Improve Recall & Estimation of Effect Uncertainty",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017624/13rRUIM2VH5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/lt/2015/03/07058341",
"title": "Uncertainty Representation in Visualizations of Learning Analytics for Learners: Current Approaches and Opportunities",
"doi": null,
"abstractUrl": "/journal/lt/2015/03/07058341/13rRUygT7pf",
"parentPublication": {
"id": "trans/lt",
"title": "IEEE Transactions on Learning Technologies",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08457476",
"title": "In Pursuit of Error: A Survey of Uncertainty Visualization Evaluation",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08457476/17D45WaTkcP",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09217952",
"title": "A Bayesian cognition approach for belief updating of correlation judgement through uncertainty visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09217952/1nL7qhcUKPe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09413010",
"title": "On-manifold Adversarial Data Augmentation Improves Uncertainty Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09413010/1tmj0SEORmE",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09548797",
"title": "Effect of uncertainty visualizations on myopic loss aversion and the equity premium puzzle in retirement investment decisions",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09548797/1xeSlZqOf8A",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09772329",
"articleId": "1DgjDn5nymI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09773967",
"articleId": "1DjDoKqOJz2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1DgjDn5nymI",
"doi": "10.1109/TVCG.2022.3173921",
"abstract": "Virtual reality has long been utilized in the games industry and is emergent for pilot training in the military and commercial airline sectors. Its usefulness as a mechanism of skills transfer to the real world has not been well researched or considered. This paper follows the PRISMA methodology to present a systematic quantitative literature review (SQLR) on the use of extended reality in flight simulators. It also encompasses recent studies of teaching and learning in immersive, virtual environments in non-aviation disciplines. The review identified 39 papers spanning all areas of the virtuality continuum across academic, commercial, and military aviation sectors, as well as engineering and medicine. The SQLR found that extended reality in flight simulators is being introduced in the commercial and military aviation sectors. However, within academia, hardware constraints have hindered the ability to provide positive empirical evidence of simulator effectiveness. While virtual reality may not replace traditional flight simulators in the near future, the technology is available to supplement classroom training activities and some aspects of simulator procedure training with promising cognitive learning outcomes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual reality has long been utilized in the games industry and is emergent for pilot training in the military and commercial airline sectors. Its usefulness as a mechanism of skills transfer to the real world has not been well researched or considered. This paper follows the PRISMA methodology to present a systematic quantitative literature review (SQLR) on the use of extended reality in flight simulators. It also encompasses recent studies of teaching and learning in immersive, virtual environments in non-aviation disciplines. The review identified 39 papers spanning all areas of the virtuality continuum across academic, commercial, and military aviation sectors, as well as engineering and medicine. The SQLR found that extended reality in flight simulators is being introduced in the commercial and military aviation sectors. However, within academia, hardware constraints have hindered the ability to provide positive empirical evidence of simulator effectiveness. While virtual reality may not replace traditional flight simulators in the near future, the technology is available to supplement classroom training activities and some aspects of simulator procedure training with promising cognitive learning outcomes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual reality has long been utilized in the games industry and is emergent for pilot training in the military and commercial airline sectors. Its usefulness as a mechanism of skills transfer to the real world has not been well researched or considered. This paper follows the PRISMA methodology to present a systematic quantitative literature review (SQLR) on the use of extended reality in flight simulators. It also encompasses recent studies of teaching and learning in immersive, virtual environments in non-aviation disciplines. The review identified 39 papers spanning all areas of the virtuality continuum across academic, commercial, and military aviation sectors, as well as engineering and medicine. The SQLR found that extended reality in flight simulators is being introduced in the commercial and military aviation sectors. However, within academia, hardware constraints have hindered the ability to provide positive empirical evidence of simulator effectiveness. While virtual reality may not replace traditional flight simulators in the near future, the technology is available to supplement classroom training activities and some aspects of simulator procedure training with promising cognitive learning outcomes.",
"title": "Using Extended Reality in Flight Simulators: A Literature Review",
"normalizedTitle": "Using Extended Reality in Flight Simulators: A Literature Review",
"fno": "09772329",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"X Reality",
"Training",
"Costs",
"Virtual Environments",
"Solid Modeling",
"Extended Reality",
"Industries",
"Aerospace",
"Artificial",
"Augmented",
"Extended And Virtual Realities",
"Simulation",
"Training"
],
"authors": [
{
"givenName": "Jamie Ian",
"surname": "Cross",
"fullName": "Jamie Ian Cross",
"affiliation": "Aviation, Griffith University, 5723 Brisbane, Queensland, Australia, 4111",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Christine",
"surname": "Boag-Hodgson",
"fullName": "Christine Boag-Hodgson",
"affiliation": "Aviation, Griffith University, 5723 Brisbane, Queensland, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tim",
"surname": "Ryley",
"fullName": "Tim Ryley",
"affiliation": "Aviation, Griffith University, 5723 Brisbane, Queensland, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Timothy",
"surname": "Mavin",
"fullName": "Timothy Mavin",
"affiliation": "Education, Griffith University, 5723 Brisbane, Queensland, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Leigh Ellen",
"surname": "Potter",
"fullName": "Leigh Ellen Potter",
"affiliation": "Aviation, Griffith University, 5723 Brisbane, Queensland, Australia",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/anss/2005/2322/0/23220168",
"title": "A Neural Approach for Fast Simulation of Flight Mechanics",
"doi": null,
"abstractUrl": "/proceedings-article/anss/2005/23220168/12OmNB1wkOQ",
"parentPublication": {
"id": "proceedings/anss/2005/2322/0",
"title": "Proceedings. 38th Annual Simulation Symposium",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1988/01/mcg1988010019",
"title": "Flight Simulators for Under $100000",
"doi": null,
"abstractUrl": "/magazine/cg/1988/01/mcg1988010019/13rRUwhpBPV",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ucc-companion/2018/0359/0/035900a353",
"title": "A Review of Applications of Extended Reality in the Construction Domain",
"doi": null,
"abstractUrl": "/proceedings-article/ucc-companion/2018/035900a353/17D45WB0qbv",
"parentPublication": {
"id": "proceedings/ucc-companion/2018/0359/0",
"title": "2018 IEEE/ACM International Conference on Utility and Cloud Computing Companion (UCC Companion)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apsec/2021/3784/0/378400a572",
"title": "Topic Trends in Issue Tracking System of Extended Reality Frameworks",
"doi": null,
"abstractUrl": "/proceedings-article/apsec/2021/378400a572/1B4m7XRlTcQ",
"parentPublication": {
"id": "proceedings/apsec/2021/3784/0",
"title": "2021 28th Asia-Pacific Software Engineering Conference (APSEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a322",
"title": "Extended Reality Training for Business and Education: The New Generation of Learning Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a322/1J7W77jxOlq",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a254",
"title": "Generative Research in the Context of Academic Extended Reality Research",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a254/1J7WcCweXhC",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a710",
"title": "Ex-Cit XR: Expert-elicitation of XR Techniques for Disengaging from IVEs",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a710/1J7WgdWP768",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/chase/2022/9476/0/947600a188",
"title": "An Improved Framework to Assess the Evaluation of Extended Reality Healthcare Simulators using Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/chase/2022/947600a188/1JjyrslgV0c",
"parentPublication": {
"id": "proceedings/chase/2022/9476/0",
"title": "2022 IEEE/ACM Conference on Connected Health: Applications, Systems and Engineering Technologies (CHASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a092",
"title": "A Literature Review of User Studies in Extended Reality Applications for Archaeology",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a092/1JrQS43SrFC",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a476",
"title": "Designing Virtual Pedagogical Agents and Mentors for Extended Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a476/1yeQC8OgSoU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09770416",
"articleId": "1D9G4zI0NIQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09772276",
"articleId": "1DgjDz35pfi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1D8asOXVetq",
"doi": "10.1109/TVCG.2022.3172560",
"abstract": "In this design study, we present Uncover, an interactive tool aimed at astronomers to find previously unidentified member stars in stellar clusters. We contribute data and task abstraction in the domain of astronomy and provide an approach for the non-trivial challenge of finding a suitable hyper-parameter set for highly flexible novelty detection models. We achieve this by substituting the tedious manual trial and error process, which usually results in finding a small subset of passable models with a five-step workflow approach. We utilize ranges of a priori defined, interpretable summary statistics models have to adhere to. Our goal is to enable astronomers to use their domain expertise to quantify model goodness effectively. We attempt to change the current culture of blindly accepting a machine learning model to one where astronomers build and modify a model based on their expertise. We evaluate the tools' usability and usefulness in a series of interviews with domain experts.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this design study, we present Uncover, an interactive tool aimed at astronomers to find previously unidentified member stars in stellar clusters. We contribute data and task abstraction in the domain of astronomy and provide an approach for the non-trivial challenge of finding a suitable hyper-parameter set for highly flexible novelty detection models. We achieve this by substituting the tedious manual trial and error process, which usually results in finding a small subset of passable models with a five-step workflow approach. We utilize ranges of a priori defined, interpretable summary statistics models have to adhere to. Our goal is to enable astronomers to use their domain expertise to quantify model goodness effectively. We attempt to change the current culture of blindly accepting a machine learning model to one where astronomers build and modify a model based on their expertise. We evaluate the tools' usability and usefulness in a series of interviews with domain experts.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this design study, we present Uncover, an interactive tool aimed at astronomers to find previously unidentified member stars in stellar clusters. We contribute data and task abstraction in the domain of astronomy and provide an approach for the non-trivial challenge of finding a suitable hyper-parameter set for highly flexible novelty detection models. We achieve this by substituting the tedious manual trial and error process, which usually results in finding a small subset of passable models with a five-step workflow approach. We utilize ranges of a priori defined, interpretable summary statistics models have to adhere to. Our goal is to enable astronomers to use their domain expertise to quantify model goodness effectively. We attempt to change the current culture of blindly accepting a machine learning model to one where astronomers build and modify a model based on their expertise. We evaluate the tools' usability and usefulness in a series of interviews with domain experts.",
"title": "Uncover: Toward Interpretable Models for Detecting New Star Cluster Members",
"normalizedTitle": "Uncover: Toward Interpretable Models for Detecting New Star Cluster Members",
"fno": "09769931",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Stars",
"Predictive Models",
"Anomaly Detection",
"Extraterrestrial Measurements",
"Data Models",
"Computational Modeling",
"Data Science",
"Interpretable Models",
"Model Selection",
"Novelty Detection",
"Star Clusters"
],
"authors": [
{
"givenName": "Sebastian",
"surname": "Ratzenbock",
"fullName": "Sebastian Ratzenbock",
"affiliation": "Research Network Data Science, University of Vienna, 27258 Vienna, Vienna, Austria, 1090",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Verena",
"surname": "Obermuller",
"fullName": "Verena Obermuller",
"affiliation": "Faculty of Computer Science, University of Vienna, 27258 Vienna, Vienna, Austria, 1090",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Torsten",
"surname": "Moller",
"fullName": "Torsten Moller",
"affiliation": "Faculty of Computer Science, University of Vienna, 27258 Wien, Wien, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Joao",
"surname": "Alves",
"fullName": "Joao Alves",
"affiliation": "Department of Astrophysics, University of Vienna, 27258 Wien, Wien, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Immanuel",
"surname": "Bomze",
"fullName": "Immanuel Bomze",
"affiliation": "ISOR/VCOR, University of Vienna, 27258 Wien, Wien, Austria",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/waina/2016/2461/0/2461a109",
"title": "Towards Data Interoperability: Turning Domain Specific Knowledge to Agnostic across the Data Lifecycle",
"doi": null,
"abstractUrl": "/proceedings-article/waina/2016/2461a109/12OmNAYoKjL",
"parentPublication": {
"id": "proceedings/waina/2016/2461/0",
"title": "2016 30th International Conference on Advanced Information Networking and Applications Workshops (WAINA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2016/5473/0/07837984",
"title": "Bayesian Rule Sets for Interpretable Classification",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2016/07837984/12OmNCfjeBN",
"parentPublication": {
"id": "proceedings/icdm/2016/5473/0",
"title": "2016 IEEE 16th International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2017/2715/0/08258215",
"title": "Improving expectation maximization algorithm over stellar data",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2017/08258215/17D45W1Oa4h",
"parentPublication": {
"id": "proceedings/big-data/2017/2715/0",
"title": "2017 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440842",
"title": "RetainVis: Visual Analytics with Interpretable and Interactive Recurrent Neural Networks on Electronic Medical Records",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440842/17D45XDIXWa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2022/8045/0/10020714",
"title": "Star-Bridge: a topological multidimensional subgraph analysis to detect fraudulent nodes and rings in telecom networks",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2022/10020714/1KfRSfulXnG",
"parentPublication": {
"id": "proceedings/big-data/2022/8045/0",
"title": "2022 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2022/5099/0/509900a773",
"title": "Toward Unsupervised Outlier Model Selection",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2022/509900a773/1KpCFhWLcas",
"parentPublication": {
"id": "proceedings/icdm/2022/5099/0",
"title": "2022 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933695",
"title": "TeleGam: Combining Visualization and Verbalization for Interpretable Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933695/1fTgH9WfhVC",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933671",
"title": "GalStamps: Analyzing Real and Simulated Galaxy Observations",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933671/1fTgK0Z7gYw",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2020/6251/0/09377834",
"title": "Toward Interpretable Machine Learning for Understanding Epidemic Data",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2020/09377834/1s64VpHyaJO",
"parentPublication": {
"id": "proceedings/big-data/2020/6251/0",
"title": "2020 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09468958",
"title": "Interpretable Anomaly Detection in Event Sequences via Sequence Matching and Visual Comparison",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09468958/1uR9IWtyEi4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09768153",
"articleId": "1D6qPjvIP16",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09770381",
"articleId": "1D9G4inD4oE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1DligyA8m7S",
"name": "ttg555501-09769931s1-supp1-3172560.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09769931s1-supp1-3172560.mp4",
"extension": "mp4",
"size": "13.6 MB",
"__typename": "WebExtraType"
},
{
"id": "1Dlighx49JC",
"name": "ttg555501-09769931s1-supp2-3172560.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09769931s1-supp2-3172560.pdf",
"extension": "pdf",
"size": "1.65 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1D9G4inD4oE",
"doi": "10.1109/TVCG.2022.3173081",
"abstract": "In this paper, we propose a method for the interactive visualization of medium-scale dynamic heightfields without visual artifacts. Our data fall into a category too large to be rendered directly at full resolution, but small enough to fit into GPU memory without pre-filtering and data streaming. We present the real-world use case of unfiltered flood simulation data of such medium scale that need to be visualized in real time for scientific purposes. Our solution facilitates compute shaders to maintain a guaranteed watertight triangulation in GPU memory that approximates the interpolated heightfields with view-dependent, continuous levels of detail. In each frame, the triangulation is updated incrementally by iteratively refining the cached result of the previous frame to minimize the computational effort. In particular, we minimize the number of heightfield sampling operations to make adaptive and higher-order interpolations viable options. We impose no restriction on the number of subdivisions and the achievable level of detail to allow for extreme zoom ranges required in geospatial visualization. Our method provides a stable runtime performance and can be executed with a limited time budget. We present a comparison of our method to three state-of-the-art methods, in which our method is competitive to previous non-watertight methods in terms of runtime, while outperforming them in terms of accuracy.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we propose a method for the interactive visualization of medium-scale dynamic heightfields without visual artifacts. Our data fall into a category too large to be rendered directly at full resolution, but small enough to fit into GPU memory without pre-filtering and data streaming. We present the real-world use case of unfiltered flood simulation data of such medium scale that need to be visualized in real time for scientific purposes. Our solution facilitates compute shaders to maintain a guaranteed watertight triangulation in GPU memory that approximates the interpolated heightfields with view-dependent, continuous levels of detail. In each frame, the triangulation is updated incrementally by iteratively refining the cached result of the previous frame to minimize the computational effort. In particular, we minimize the number of heightfield sampling operations to make adaptive and higher-order interpolations viable options. We impose no restriction on the number of subdivisions and the achievable level of detail to allow for extreme zoom ranges required in geospatial visualization. Our method provides a stable runtime performance and can be executed with a limited time budget. We present a comparison of our method to three state-of-the-art methods, in which our method is competitive to previous non-watertight methods in terms of runtime, while outperforming them in terms of accuracy.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we propose a method for the interactive visualization of medium-scale dynamic heightfields without visual artifacts. Our data fall into a category too large to be rendered directly at full resolution, but small enough to fit into GPU memory without pre-filtering and data streaming. We present the real-world use case of unfiltered flood simulation data of such medium scale that need to be visualized in real time for scientific purposes. Our solution facilitates compute shaders to maintain a guaranteed watertight triangulation in GPU memory that approximates the interpolated heightfields with view-dependent, continuous levels of detail. In each frame, the triangulation is updated incrementally by iteratively refining the cached result of the previous frame to minimize the computational effort. In particular, we minimize the number of heightfield sampling operations to make adaptive and higher-order interpolations viable options. We impose no restriction on the number of subdivisions and the achievable level of detail to allow for extreme zoom ranges required in geospatial visualization. Our method provides a stable runtime performance and can be executed with a limited time budget. We present a comparison of our method to three state-of-the-art methods, in which our method is competitive to previous non-watertight methods in terms of runtime, while outperforming them in terms of accuracy.",
"title": "Watertight Incremental Heightfield Tessellation",
"normalizedTitle": "Watertight Incremental Heightfield Tessellation",
"fno": "09770381",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Surface Reconstruction",
"Merging",
"Data Visualization",
"Real Time Systems",
"Rendering Computer Graphics",
"Interpolation",
"Graphics Processing Units",
"Visualization Techniques And Methodologies",
"Heightfield Rendering",
"Terrain Rendering",
"Level Of Detail",
"Tessellation"
],
"authors": [
{
"givenName": "Daniel",
"surname": "Cornel",
"fullName": "Daniel Cornel",
"affiliation": "Integrated Simulations Group, VRVis Zentrum fr Virtual Reality und Visualisierung Forschungs-GmbH, Vienna, Vienna, Austria, 1030",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Silvana",
"surname": "Zechmeister",
"fullName": "Silvana Zechmeister",
"affiliation": "Integrated Simulations Group, VRVis Zentrum fr Virtual Reality und Visualisierung Forschungs-GmbH, Vienna, Vienna, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Eduard",
"surname": "Groeller",
"fullName": "Eduard Groeller",
"affiliation": "Institute of Computer Graphics and Algorithms, TU Wien, 27259 Wien, Wien, Austria, 1040",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jurgen",
"surname": "Waser",
"fullName": "Jurgen Waser",
"affiliation": "Visualization, VRVis Vienna, Vienna, Vienna, Austria, 1220",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/rt/2006/0693/0/04061556",
"title": "Incremental Raycasting of Piecewise Quadratic Surfaces on the GPU",
"doi": null,
"abstractUrl": "/proceedings-article/rt/2006/04061556/12OmNApcukG",
"parentPublication": {
"id": "proceedings/rt/2006/0693/0",
"title": "IEEE Symposium on Interactive Ray Tracing 2006",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icitcs/2014/6541/0/07021710",
"title": "A Height-Map Based Terrain Rendering with Tessellation Hardware",
"doi": null,
"abstractUrl": "/proceedings-article/icitcs/2014/07021710/12OmNBE7Ms6",
"parentPublication": {
"id": "proceedings/icitcs/2014/6541/0",
"title": "2014 International Conference on IT Convergence and Security (ICITCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2012/4829/0/4829a126",
"title": "Efficient HPR-Based Rendering of Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2012/4829a126/12OmNzIUfWI",
"parentPublication": {
"id": "proceedings/sibgrapi/2012/4829/0",
"title": "2012 25th SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2014/4258/0/4258a343",
"title": "Image-Based Streamsurfaces",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2014/4258a343/12OmNzWx06V",
"parentPublication": {
"id": "proceedings/sibgrapi/2014/4258/0",
"title": "2014 27th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/10/ttg2013101732",
"title": "Octree Rasterization: Accelerating High-Quality Out-of-Core GPU Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2013/10/ttg2013101732/13rRUwvBy8T",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/05/ttg2011050669",
"title": "Data-Parallel Octrees for Surface Reconstruction",
"doi": null,
"abstractUrl": "/journal/tg/2011/05/ttg2011050669/13rRUxCitJ9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/03/ttg2011030345",
"title": "GPU-Assisted Computation of Centroidal Voronoi Tessellation",
"doi": null,
"abstractUrl": "/journal/tg/2011/03/ttg2011030345/13rRUyoPSP2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600m2819",
"title": "GIFS: Neural Implicit Function for General Shape Representation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600m2819/1H0KHzfm1Ta",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv-2/2019/2850/0/285000a160",
"title": "Hybrid Polygon-Point Rendering of Singular and Non-Manifold Implicit Surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/iv-2/2019/285000a160/1cMEQnNfRXG",
"parentPublication": {
"id": "proceedings/iv-2/2019/2850/0",
"title": "2019 23rd International Conference in Information Visualization – Part II",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nicoint/2020/8771/0/09122336",
"title": "Improving GPU Real-Time Wide Terrain Tessellation Using the New Mesh Shader Pipeline",
"doi": null,
"abstractUrl": "/proceedings-article/nicoint/2020/09122336/1kRSelKDDk4",
"parentPublication": {
"id": "proceedings/nicoint/2020/8771/0",
"title": "2020 Nicograph International (NicoInt)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09769931",
"articleId": "1D8asOXVetq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09770416",
"articleId": "1D9G4zI0NIQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1DjDp9VSs4o",
"name": "ttg555501-09770381s1-tvcg-3173081-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09770381s1-tvcg-3173081-mm.zip",
"extension": "zip",
"size": "8.39 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1D9G4zI0NIQ",
"doi": "10.1109/TVCG.2022.3172361",
"abstract": "Reflectional symmetry is a ubiquitous pattern in nature. Previous works usually solve this problem by voting or sampling, suffering from high computational cost and randomness. In this paper, we propose a learning-based approach to intrinsic reflectional symmetry detection. Instead of directly finding symmetric point pairs, we parametrize this self-isometry using a functional map matrix, which can be easily computed given the signs of Laplacian eigenfunctions under the symmetric mapping. Therefore, we manually label the eigenfunction signs for a variety of shapes and train a novel neural network to predict the sign of each eigenfunction under symmetry. Our network aims at learning the global property of functions and consequently converts the problem defined on the manifold to the functional domain. By disentangling the prediction of the matrix into separated bases, our method generalizes well to new shapes and is invariant under perturbation of eigenfunctions. Through extensive experiments, we demonstrate the robustness of our method in challenging cases, including different topology and incomplete shapes with holes. By avoiding random sampling, our learning-based algorithm is over 20 times faster than state-of-the-art methods, and meanwhile, is more robust, achieving higher correspondence accuracy in commonly used metrics.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Reflectional symmetry is a ubiquitous pattern in nature. Previous works usually solve this problem by voting or sampling, suffering from high computational cost and randomness. In this paper, we propose a learning-based approach to intrinsic reflectional symmetry detection. Instead of directly finding symmetric point pairs, we parametrize this self-isometry using a functional map matrix, which can be easily computed given the signs of Laplacian eigenfunctions under the symmetric mapping. Therefore, we manually label the eigenfunction signs for a variety of shapes and train a novel neural network to predict the sign of each eigenfunction under symmetry. Our network aims at learning the global property of functions and consequently converts the problem defined on the manifold to the functional domain. By disentangling the prediction of the matrix into separated bases, our method generalizes well to new shapes and is invariant under perturbation of eigenfunctions. Through extensive experiments, we demonstrate the robustness of our method in challenging cases, including different topology and incomplete shapes with holes. By avoiding random sampling, our learning-based algorithm is over 20 times faster than state-of-the-art methods, and meanwhile, is more robust, achieving higher correspondence accuracy in commonly used metrics.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Reflectional symmetry is a ubiquitous pattern in nature. Previous works usually solve this problem by voting or sampling, suffering from high computational cost and randomness. In this paper, we propose a learning-based approach to intrinsic reflectional symmetry detection. Instead of directly finding symmetric point pairs, we parametrize this self-isometry using a functional map matrix, which can be easily computed given the signs of Laplacian eigenfunctions under the symmetric mapping. Therefore, we manually label the eigenfunction signs for a variety of shapes and train a novel neural network to predict the sign of each eigenfunction under symmetry. Our network aims at learning the global property of functions and consequently converts the problem defined on the manifold to the functional domain. By disentangling the prediction of the matrix into separated bases, our method generalizes well to new shapes and is invariant under perturbation of eigenfunctions. Through extensive experiments, we demonstrate the robustness of our method in challenging cases, including different topology and incomplete shapes with holes. By avoiding random sampling, our learning-based algorithm is over 20 times faster than state-of-the-art methods, and meanwhile, is more robust, achieving higher correspondence accuracy in commonly used metrics.",
"title": "Learning-based Intrinsic Reflectional Symmetry Detection",
"normalizedTitle": "Learning-based Intrinsic Reflectional Symmetry Detection",
"fno": "09770416",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Eigenvalues And Eigenfunctions",
"Shape",
"Manifolds",
"Neural Networks",
"Symmetric Matrices",
"Matrix Converters",
"Laplace Equations",
"Mesh Processing",
"Symmetry Detection",
"Deep Learning",
"Intrinsic Reflectional Symmetry",
"Laplacian",
"Eigenanalysis"
],
"authors": [
{
"givenName": "Yi-Ling",
"surname": "Qiao",
"fullName": "Yi-Ling Qiao",
"affiliation": "Advanced Computer Research Center, Institute of Computing Technology, Chinese Academy of Sciences, Beijing, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lin",
"surname": "Gao",
"fullName": "Lin Gao",
"affiliation": "Advanced Computer Research Center, Institute of Computing Technology Chinese Academy of Sciences, 53035 Beijing, Beijing, China, 100190",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shu-Zhi",
"surname": "Liu",
"fullName": "Shu-Zhi Liu",
"affiliation": "Computer Science and Technology, University of Chinese Academy of Sciences, Beijing, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ligang",
"surname": "Liu",
"fullName": "Ligang Liu",
"affiliation": "Department of Mathematics, Zhejiang University, Hangzhou, Zhejiang, China, 310027",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yu-Kun",
"surname": "Lai",
"fullName": "Yu-Kun Lai",
"affiliation": "School of Computer Science and Informatics, Cardiff University, 2112 Cardiff, South Glamorgan, United Kingdom of Great Britain and Northern Ireland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xilin",
"surname": "Chen",
"fullName": "Xilin Chen",
"affiliation": "Institute of Computing Technology, Chinese Academy of Sciences, Beijing, Beijing, China, 100080",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2016/7258/0/07552918",
"title": "BCA: Bi-symmetric component analysis for temporal symmetry in human actions",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2016/07552918/12OmNrkjVgn",
"parentPublication": {
"id": "proceedings/icme/2016/7258/0",
"title": "2016 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2011/0529/0/05981743",
"title": "Laplace-Beltrami eigenfunction metrics and geodesic shape distance features for shape matching in synthetic aperture sonar",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2011/05981743/12OmNwkhTgo",
"parentPublication": {
"id": "proceedings/cvprw/2011/0529/0",
"title": "CVPR 2011 WORKSHOPS",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457d624",
"title": "Geodesic Distance Descriptors",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457d624/12OmNx38vRo",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2008/2242/0/04587538",
"title": "Articulated shape matching using Laplacian eigenfunctions and unsupervised point registration",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2008/04587538/12OmNyQph7m",
"parentPublication": {
"id": "proceedings/cvpr/2008/2242/0",
"title": "2008 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1992/2855/0/00223196",
"title": "A measure of symmetry based on shape similarity",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1992/00223196/12OmNzayN95",
"parentPublication": {
"id": "proceedings/cvpr/1992/2855/0",
"title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2016/5407/0/5407a499",
"title": "SpectroMeter: Amortized Sublinear Spectral Approximation of Distance on Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2016/5407a499/12OmNzdoMNK",
"parentPublication": {
"id": "proceedings/3dv/2016/5407/0",
"title": "2016 Fourth International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1999/05/i0466",
"title": "Symmetry Detection by Generalized Complex (GC) Moments: A Close-Form Solution",
"doi": null,
"abstractUrl": "/journal/tp/1999/05/i0466/13rRUxbTMA3",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/04/09808406",
"title": "Learning to Detect 3D Symmetry From Single-View RGB-D Images With Weak Supervision",
"doi": null,
"abstractUrl": "/journal/tp/2023/04/09808406/1EzDOrzPeeI",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956227",
"title": "Projection of semi-shapes for rotational symmetry detection",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956227/1IHqvNVzR4s",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/08/09007740",
"title": "Variational Level Set Evolution for Non-Rigid 3D Reconstruction From a Single Depth Camera",
"doi": null,
"abstractUrl": "/journal/tp/2021/08/09007740/1hGqrsQbjPO",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09770381",
"articleId": "1D9G4inD4oE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09772329",
"articleId": "1DgjDn5nymI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1D6qPjvIP16",
"doi": "10.1109/TVCG.2022.3171074",
"abstract": "In domains such as agronomy or manufacturing, experts need to consider trade-offs when making decisions that involve several, often competing, objectives. Such analysis is complex and may be conducted over long periods of time, making it hard to revisit. In this paper, we consider the use of analytic provenance mechanisms to aid experts recall and keep track of trade-off analysis. We implemented VisProm, a web-based trade-off analysis system, that incorporates in-visualization provenance views, designed to help experts keep track of trade-offs and their objectives. We used VisProm as a technology probe to understand user needs and explore the potential role of provenance in this context. Through observation sessions with three groups of experts analyzing their own data, we make the following contributions. We first, identify eight high-level tasks that experts engaged in during trade-off analysis, such as locating and characterizing interest zones in the trade-off space, and show how these tasks can be supported by provenance visualization. Second, we refine findings from previous work on provenance purposes such as recall and reproduce, by identifying specific objects of these purposes related to trade-off analysis, such as interest zones, and exploration structure (e.g., exploration of alternatives and branches). Third, we discuss insights on how the identified provenance objects and our designs support these trade-off analysis tasks, both when revisiting past analysis and while actively exploring. And finally, we identify new opportunities for provenance-driven trade-off analysis, for example related to monitoring the coverage of the trade-off space, and tracking alternative trade-off scenarios.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In domains such as agronomy or manufacturing, experts need to consider trade-offs when making decisions that involve several, often competing, objectives. Such analysis is complex and may be conducted over long periods of time, making it hard to revisit. In this paper, we consider the use of analytic provenance mechanisms to aid experts recall and keep track of trade-off analysis. We implemented VisProm, a web-based trade-off analysis system, that incorporates in-visualization provenance views, designed to help experts keep track of trade-offs and their objectives. We used VisProm as a technology probe to understand user needs and explore the potential role of provenance in this context. Through observation sessions with three groups of experts analyzing their own data, we make the following contributions. We first, identify eight high-level tasks that experts engaged in during trade-off analysis, such as locating and characterizing interest zones in the trade-off space, and show how these tasks can be supported by provenance visualization. Second, we refine findings from previous work on provenance purposes such as recall and reproduce, by identifying specific objects of these purposes related to trade-off analysis, such as interest zones, and exploration structure (e.g., exploration of alternatives and branches). Third, we discuss insights on how the identified provenance objects and our designs support these trade-off analysis tasks, both when revisiting past analysis and while actively exploring. And finally, we identify new opportunities for provenance-driven trade-off analysis, for example related to monitoring the coverage of the trade-off space, and tracking alternative trade-off scenarios.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In domains such as agronomy or manufacturing, experts need to consider trade-offs when making decisions that involve several, often competing, objectives. Such analysis is complex and may be conducted over long periods of time, making it hard to revisit. In this paper, we consider the use of analytic provenance mechanisms to aid experts recall and keep track of trade-off analysis. We implemented VisProm, a web-based trade-off analysis system, that incorporates in-visualization provenance views, designed to help experts keep track of trade-offs and their objectives. We used VisProm as a technology probe to understand user needs and explore the potential role of provenance in this context. Through observation sessions with three groups of experts analyzing their own data, we make the following contributions. We first, identify eight high-level tasks that experts engaged in during trade-off analysis, such as locating and characterizing interest zones in the trade-off space, and show how these tasks can be supported by provenance visualization. Second, we refine findings from previous work on provenance purposes such as recall and reproduce, by identifying specific objects of these purposes related to trade-off analysis, such as interest zones, and exploration structure (e.g., exploration of alternatives and branches). Third, we discuss insights on how the identified provenance objects and our designs support these trade-off analysis tasks, both when revisiting past analysis and while actively exploring. And finally, we identify new opportunities for provenance-driven trade-off analysis, for example related to monitoring the coverage of the trade-off space, and tracking alternative trade-off scenarios.",
"title": "Understanding How In-Visualization Provenance Can Support Trade-off Analysis",
"normalizedTitle": "Understanding How In-Visualization Provenance Can Support Trade-off Analysis",
"fno": "09768153",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Task Analysis",
"Data Visualization",
"History",
"Decision Making",
"Probes",
"Optimization",
"Object Recognition",
"Provenance",
"Visualization",
"Trade Offs",
"Multi Criteria",
"Decision Making",
"Qualitative Study"
],
"authors": [
{
"givenName": "Mehdi Rafik",
"surname": "Chakhchoukh",
"fullName": "Mehdi Rafik Chakhchoukh",
"affiliation": "LISN, Universit Paris-Saclay, 27048 Gif-sur-Yvette, Ile-de-France, France, 91190",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nadia",
"surname": "Boukhelifa",
"fullName": "Nadia Boukhelifa",
"affiliation": "CEPIA, INRA, 27057 Paris, Ile-de-france, France, 75338",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anastasia",
"surname": "Bezerianos",
"fullName": "Anastasia Bezerianos",
"affiliation": "LRI-Laboratoire de Recherche en Informatique, Universite Paris-Sud, Paris, orsay, France, 91405",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/isspit/2017/4662/0/08388644",
"title": "A standard decision format using provenance",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2017/08388644/12OmNvFHfHY",
"parentPublication": {
"id": "proceedings/isspit/2017/4662/0",
"title": "2017 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccad/2003/762/0/01257852",
"title": "A trade-off Oriented placement tool",
"doi": null,
"abstractUrl": "/proceedings-article/iccad/2003/01257852/12OmNwEJ0SM",
"parentPublication": {
"id": "proceedings/iccad/2003/762/0",
"title": "ICCAD-2003. International Conference on Computer Aided Design",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2008/3075/0/04439098",
"title": "Measuring Data Believability: A Provenance Approach",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2008/04439098/12OmNxcMSgM",
"parentPublication": {
"id": "proceedings/hicss/2008/3075/0",
"title": "Proceedings of the 41st Annual Hawaii International Conference on System Sciences (HICSS 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/igcc/2012/2155/0/IEEECol",
"title": "Optimal energy trade-off schedules",
"doi": null,
"abstractUrl": "/proceedings-article/igcc/2012/IEEECol/12OmNyrIaKN",
"parentPublication": {
"id": "proceedings/igcc/2012/2155/0",
"title": "2012 International Green Computing Conference (IGCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/kbse/1996/7680/0/76800144",
"title": "Software Synthesis for Trade-off Design",
"doi": null,
"abstractUrl": "/proceedings-article/kbse/1996/76800144/12OmNz6iOkc",
"parentPublication": {
"id": "proceedings/kbse/1996/7680/0",
"title": "Knowledge-Based Software Engineering Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/sc/2017/04/07327204",
"title": "Self-Adaptive Trade-off Decision Making for Autoscaling Cloud-Based Services",
"doi": null,
"abstractUrl": "/journal/sc/2017/04/07327204/13rRUxDqS5M",
"parentPublication": {
"id": "trans/sc",
"title": "IEEE Transactions on Services Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/seams/2018/5715/0/571501a012",
"title": "DeSiRE: Further Understanding Nuances of Degrees of Satisfaction of Non-functional Requirements Trade-Off",
"doi": null,
"abstractUrl": "/proceedings-article/seams/2018/571501a012/17D45WnnFXV",
"parentPublication": {
"id": "proceedings/seams/2018/5715/0",
"title": "2018 IEEE/ACM 13th International Symposium on Software Engineering for Adaptive and Self-Managing Systems (SEAMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09903572",
"title": "The Influence of Visual Provenance Representations on Strategies in a Collaborative Hand-off Data Analysis Scenario",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09903572/1GZonS2SkKs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2019/06/08788592",
"title": "Analytic Provenance in Practice: The Role of Provenance in Real-World Visualization and Data Analysis Environments",
"doi": null,
"abstractUrl": "/magazine/cg/2019/06/08788592/1cfqCMPtgRy",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2021/9184/0/918400c713",
"title": "PITA: Privacy Through Provenance Abstraction",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2021/918400c713/1uGXoWOC8zS",
"parentPublication": {
"id": "proceedings/icde/2021/9184/0",
"title": "2021 IEEE 37th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09767765",
"articleId": "1D4MJudYK3u",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09769931",
"articleId": "1D8asOXVetq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1D8arLUWsoM",
"name": "ttg555501-09768153s1-tvcg-3171074-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09768153s1-tvcg-3171074-mm.zip",
"extension": "zip",
"size": "115 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1D4MJ8H1fk4",
"doi": "10.1109/TVCG.2022.3172107",
"abstract": "With the fast-growing number of classification models being produced every day, numerous model interpretation and comparison solutions have also been introduced. For example, LIME and SHAP can interpret what input features contribute more to a classifier's output predictions. Different numerical metrics (e.g., accuracy) can be used to easily compare two classifiers. However, few works can interpret the contribution of a data feature to a classifier in comparison with its contribution to another classifier. This comparative interpretation can help to disclose the fundamental difference between two classifiers, select classifiers in different feature conditions, and better ensemble two classifiers. To accomplish it, we propose a learning-from-disagreement (LFD) framework to visually compare two classification models. Specifically, LFD identifies data instances with disagreed predictions from two compared classifiers and trains a discriminator to learn from the disagreed instances. As the two classifiers' training features may not be available, we train the discriminator through a set of meta-features proposed based on certain hypotheses of the classifiers to probe their behaviors. Interpreting the trained discriminator with the SHAP values of different meta-features, we provide actionable insights into the compared classifiers. Also, we introduce multiple metrics to profile the importance of meta-features from different perspectives. With these metrics, one can easily identify meta-features with the most complementary behaviors in two classifiers, and use them to better ensemble the classifiers. We focus on binary classification models in the financial services and advertising industry to demonstrate the efficacy of our proposed framework and visualizations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the fast-growing number of classification models being produced every day, numerous model interpretation and comparison solutions have also been introduced. For example, LIME and SHAP can interpret what input features contribute more to a classifier's output predictions. Different numerical metrics (e.g., accuracy) can be used to easily compare two classifiers. However, few works can interpret the contribution of a data feature to a classifier in comparison with its contribution to another classifier. This comparative interpretation can help to disclose the fundamental difference between two classifiers, select classifiers in different feature conditions, and better ensemble two classifiers. To accomplish it, we propose a learning-from-disagreement (LFD) framework to visually compare two classification models. Specifically, LFD identifies data instances with disagreed predictions from two compared classifiers and trains a discriminator to learn from the disagreed instances. As the two classifiers' training features may not be available, we train the discriminator through a set of meta-features proposed based on certain hypotheses of the classifiers to probe their behaviors. Interpreting the trained discriminator with the SHAP values of different meta-features, we provide actionable insights into the compared classifiers. Also, we introduce multiple metrics to profile the importance of meta-features from different perspectives. With these metrics, one can easily identify meta-features with the most complementary behaviors in two classifiers, and use them to better ensemble the classifiers. We focus on binary classification models in the financial services and advertising industry to demonstrate the efficacy of our proposed framework and visualizations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the fast-growing number of classification models being produced every day, numerous model interpretation and comparison solutions have also been introduced. For example, LIME and SHAP can interpret what input features contribute more to a classifier's output predictions. Different numerical metrics (e.g., accuracy) can be used to easily compare two classifiers. However, few works can interpret the contribution of a data feature to a classifier in comparison with its contribution to another classifier. This comparative interpretation can help to disclose the fundamental difference between two classifiers, select classifiers in different feature conditions, and better ensemble two classifiers. To accomplish it, we propose a learning-from-disagreement (LFD) framework to visually compare two classification models. Specifically, LFD identifies data instances with disagreed predictions from two compared classifiers and trains a discriminator to learn from the disagreed instances. As the two classifiers' training features may not be available, we train the discriminator through a set of meta-features proposed based on certain hypotheses of the classifiers to probe their behaviors. Interpreting the trained discriminator with the SHAP values of different meta-features, we provide actionable insights into the compared classifiers. Also, we introduce multiple metrics to profile the importance of meta-features from different perspectives. With these metrics, one can easily identify meta-features with the most complementary behaviors in two classifiers, and use them to better ensemble the classifiers. We focus on binary classification models in the financial services and advertising industry to demonstrate the efficacy of our proposed framework and visualizations.",
"title": "Learning-From-Disagreement: A Model Comparison and Visual Analytics Framework",
"normalizedTitle": "Learning-From-Disagreement: A Model Comparison and Visual Analytics Framework",
"fno": "09767606",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Measurement",
"Training",
"Predictive Models",
"Data Models",
"Analytical Models",
"Data Visualization",
"Computational Modeling",
"Learning From Disagreement",
"Model Comparison",
"Feature Visualization",
"Visual Analytics",
"Explainable AI"
],
"authors": [
{
"givenName": "Junpeng",
"surname": "Wang",
"fullName": "Junpeng Wang",
"affiliation": "Data Analytics Team, Visa Research, Palo Alto, California, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Liang",
"surname": "Wang",
"fullName": "Liang Wang",
"affiliation": "Data Analytics Team, Visa Research, Palo Alto, California, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yan",
"surname": "Zheng",
"fullName": "Yan Zheng",
"affiliation": "Data Analytics Team, Visa Research, Palo Alto, California, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chin-Chia Michael",
"surname": "Yeh",
"fullName": "Chin-Chia Michael Yeh",
"affiliation": "Data Analytics Team, Visa Research, Palo Alto, California, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shubham",
"surname": "Jain",
"fullName": "Shubham Jain",
"affiliation": "Data Analytics Team, Visa Research, Palo Alto, California, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wei",
"surname": "Zhang",
"fullName": "Wei Zhang",
"affiliation": "Data Analytics Team, Visa Research, Palo Alto, California, United States",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iacc/2017/1560/0/07976772",
"title": "Framework for Predictive Analytics as a Service Using Ensemble Model",
"doi": null,
"abstractUrl": "/proceedings-article/iacc/2017/07976772/12OmNrJROZy",
"parentPublication": {
"id": "proceedings/iacc/2017/1560/0",
"title": "2017 IEEE 7th International Advance Computing Conference (IACC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cscloud/2015/9300/0/9300a272",
"title": "Meta Meta-Analytics for Risk Forecast Using Big Data Meta-Regression in Financial Industry",
"doi": null,
"abstractUrl": "/proceedings-article/cscloud/2015/9300a272/12OmNwHQB0s",
"parentPublication": {
"id": "proceedings/cscloud/2015/9300/0",
"title": "2015 IEEE 2nd International Conference on Cyber Security and Cloud Computing (CSCloud)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06875967",
"title": "Knowledge Generation Model for Visual Analytics",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06875967/13rRUILLkvt",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a210",
"title": "Industry-Driven Visual Analytics for Understanding Financial Timeseries Models",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a210/1cMF9CwA2xq",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08812988",
"title": "Explaining Vulnerabilities to Adversarial Machine Learning through Visual Analytics",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08812988/1cOhCfAgaZO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09219240",
"title": "A Visual Analytics Framework for Explaining and Diagnosing Transfer Learning Processes",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09219240/1nMMmribStW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222341",
"title": "Visual Analytics for Temporal Hypergraph Model Exploration",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222341/1nTqGZHBYM8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2020/8009/0/800900a012",
"title": "Diagnosing Concept Drift with Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2020/800900a012/1q7jvQC41gs",
"parentPublication": {
"id": "proceedings/vast/2020/8009/0",
"title": "2020 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09408377",
"title": "Inspecting the Running Process of Horizontal Federated Learning via Visual Analytics",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09408377/1sVEP6R17eU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2021/3931/0/393100a186",
"title": "Investigating the Evolution of Tree Boosting Models with Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2021/393100a186/1tTtslm0K4g",
"parentPublication": {
"id": "proceedings/pacificvis/2021/3931/0",
"title": "2021 IEEE 14th Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09767766",
"articleId": "1D4MIYJDHpK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09767765",
"articleId": "1D4MJudYK3u",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1D6qQIMwwsU",
"name": "ttg555501-09767606s1-supp2-3172107.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09767606s1-supp2-3172107.mp4",
"extension": "mp4",
"size": "63.9 MB",
"__typename": "WebExtraType"
},
{
"id": "1D6qRriZyqk",
"name": "ttg555501-09767606s1-supp1-3172107.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09767606s1-supp1-3172107.pdf",
"extension": "pdf",
"size": "2.2 MB",
"__typename": "WebExtraType"
},
{
"id": "1D6qRk7Uwta",
"name": "ttg555501-09767606s1-supp3-3172107.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09767606s1-supp3-3172107.mp4",
"extension": "mp4",
"size": "35.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1D4MIOrP28M",
"doi": "10.1109/TVCG.2022.3170853",
"abstract": "The freeform architectural modeling process often involves two important stages: concept design and digital modeling. In the first stage, architects usually sketch the overall 3D shape and the panel layout on a physical or digital paper briefly. In the second stage, a digital 3D model is created using the sketch as a reference. The digital model needs to incorporate geometric requirements for its components, such as the planarity of panels due to consideration of construction costs, which can make the modeling process more challenging. In this work, we present a novel sketch-based system to bridge the concept design and digital modeling of freeform roof-like shapes represented as planar quadrilateral (PQ) meshes. Our system allows the user to sketch the surface boundary and contour lines under axonometric projection and supports the sketching of occluded regions. In addition, the user can sketch feature lines to provide directional guidance to the PQ mesh layout. Given the 2D sketch input, we propose a deep neural network to infer in real-time the underlying surface shape along with a dense conjugate direction field, both of which are used to extract the final PQ mesh. To train and validate our network, we generate a large synthetic dataset that mimics architect sketching of freeform quadrilateral patches. The effectiveness and usability of our system are demonstrated with quantitative and qualitative evaluation as well as user studies.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The freeform architectural modeling process often involves two important stages: concept design and digital modeling. In the first stage, architects usually sketch the overall 3D shape and the panel layout on a physical or digital paper briefly. In the second stage, a digital 3D model is created using the sketch as a reference. The digital model needs to incorporate geometric requirements for its components, such as the planarity of panels due to consideration of construction costs, which can make the modeling process more challenging. In this work, we present a novel sketch-based system to bridge the concept design and digital modeling of freeform roof-like shapes represented as planar quadrilateral (PQ) meshes. Our system allows the user to sketch the surface boundary and contour lines under axonometric projection and supports the sketching of occluded regions. In addition, the user can sketch feature lines to provide directional guidance to the PQ mesh layout. Given the 2D sketch input, we propose a deep neural network to infer in real-time the underlying surface shape along with a dense conjugate direction field, both of which are used to extract the final PQ mesh. To train and validate our network, we generate a large synthetic dataset that mimics architect sketching of freeform quadrilateral patches. The effectiveness and usability of our system are demonstrated with quantitative and qualitative evaluation as well as user studies.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The freeform architectural modeling process often involves two important stages: concept design and digital modeling. In the first stage, architects usually sketch the overall 3D shape and the panel layout on a physical or digital paper briefly. In the second stage, a digital 3D model is created using the sketch as a reference. The digital model needs to incorporate geometric requirements for its components, such as the planarity of panels due to consideration of construction costs, which can make the modeling process more challenging. In this work, we present a novel sketch-based system to bridge the concept design and digital modeling of freeform roof-like shapes represented as planar quadrilateral (PQ) meshes. Our system allows the user to sketch the surface boundary and contour lines under axonometric projection and supports the sketching of occluded regions. In addition, the user can sketch feature lines to provide directional guidance to the PQ mesh layout. Given the 2D sketch input, we propose a deep neural network to infer in real-time the underlying surface shape along with a dense conjugate direction field, both of which are used to extract the final PQ mesh. To train and validate our network, we generate a large synthetic dataset that mimics architect sketching of freeform quadrilateral patches. The effectiveness and usability of our system are demonstrated with quantitative and qualitative evaluation as well as user studies.",
"title": "Sketch2PQ: Freeform Planar Quadrilateral Mesh Design via a Single Sketch",
"normalizedTitle": "Sketch2PQ: Freeform Planar Quadrilateral Mesh Design via a Single Sketch",
"fno": "09767703",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Three Dimensional Displays",
"Shape",
"Solid Modeling",
"Geometry",
"Layout",
"Splines Mathematics",
"Semiconductor Device Modeling",
"Freeform Surface",
"Architectural Geometry",
"Planar Quadrilateral Mesh",
"Sketch Based Modeling",
"Deep Learning"
],
"authors": [
{
"givenName": "Zhi",
"surname": "Deng",
"fullName": "Zhi Deng",
"affiliation": "School of Data Science, University of Science and Technology of China, 12652 Hefei, Anhui, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yang",
"surname": "Liu",
"fullName": "Yang Liu",
"affiliation": "Internet Graphics Group, Microsoft Research Asia, Beijing, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hao",
"surname": "Pan",
"fullName": "Hao Pan",
"affiliation": "Internet Graphics Group, Microsoft Research Asia, 216064 Beijing, Beijing, China, 100080",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wassim",
"surname": "Jabi",
"fullName": "Wassim Jabi",
"affiliation": "Welsh School of Architecture, Cardiff University, 2112 Cardiff, Cardiff, United Kingdom of Great Britain and Northern Ireland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Juyong",
"surname": "Zhang",
"fullName": "Juyong Zhang",
"affiliation": "Mathematical Sciences, University of Science and Technology of China, Hefei, Anhui, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bailin",
"surname": "Deng",
"fullName": "Bailin Deng",
"affiliation": "School of Computer Science and Informatics, Cardiff University, 2112 Cardiff, Wales, United Kingdom of Great Britain and Northern Ireland, CF24 3AA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2013/2246/0/2246a252",
"title": "An Extension Algorithm for Ball B-Spline Curves with G2 Continuity",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2013/2246a252/12OmNC8MsKH",
"parentPublication": {
"id": "proceedings/cw/2013/2246/0",
"title": "2013 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2011/4548/0/4548a078",
"title": "Sketch-Based Adaptive Mesh Augmentation Using Stellar Operators",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2011/4548a078/12OmNvT2p7O",
"parentPublication": {
"id": "proceedings/sibgrapi/2011/4548/0",
"title": "2011 24th SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2015/9403/0/9403a098",
"title": "3D Maquetter: Sketch-Based 3D Content Modeling for Digital Earth",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2015/9403a098/12OmNvkGW3B",
"parentPublication": {
"id": "proceedings/cw/2015/9403/0",
"title": "2015 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2010/4215/0/4215a105",
"title": "Reference Plane Assisted Sketching Interface for 3D Freeform Shape Design",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2010/4215a105/12OmNz2C1zv",
"parentPublication": {
"id": "proceedings/cw/2010/4215/0",
"title": "2010 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2013/2576/0/06815018",
"title": "G2-Continuity Blending of Ball B-Spline Curve Using Extension",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2013/06815018/12OmNzX6coq",
"parentPublication": {
"id": "proceedings/cad-graphics/2013/2576/0",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2015/06/mcg2015060051",
"title": "Angle-Preserving Quadrilateral Mesh Parameterization",
"doi": null,
"abstractUrl": "/magazine/cg/2015/06/mcg2015060051/13rRUxBa5pf",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2022/5670/0/567000a022",
"title": "Garment Ideation: Iterative View-Aware Sketch-Based Garment Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2022/567000a022/1KYsti3axvq",
"parentPublication": {
"id": "proceedings/3dv/2022/5670/0",
"title": "2022 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvlsi/2019/3391/0/339100a574",
"title": "PVTMC: An All-Digital Sub-Picosecond Timing Measurement Circuit Based on Process Variations",
"doi": null,
"abstractUrl": "/proceedings-article/isvlsi/2019/339100a574/1dUnISEfIDm",
"parentPublication": {
"id": "proceedings/isvlsi/2019/3391/0",
"title": "2019 IEEE Computer Society Annual Symposium on VLSI (ISVLSI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2020/8432/0/843200a154",
"title": "Sketch-Based Interaction for Planning-Based Interactive Storytelling",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2020/843200a154/1pQIKYRlYTm",
"parentPublication": {
"id": "proceedings/sbgames/2020/8432/0",
"title": "2020 19th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2021/03/09392277",
"title": "Skeleton-Based Parametric 2-D Region Representation: Disk B-Spline Curves",
"doi": null,
"abstractUrl": "/magazine/cg/2021/03/09392277/1sq7H56VfiM",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09767783",
"articleId": "1D4MIotOemQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09767766",
"articleId": "1D4MIYJDHpK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1D6qPLXROq4",
"name": "ttg555501-09767703s1-supp2-3170853.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09767703s1-supp2-3170853.mp4",
"extension": "mp4",
"size": "61.1 MB",
"__typename": "WebExtraType"
},
{
"id": "1D6qPZopiVi",
"name": "ttg555501-09767703s1-supp1-3170853.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09767703s1-supp1-3170853.pdf",
"extension": "pdf",
"size": "6.4 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1D4MJudYK3u",
"doi": "10.1109/TVCG.2022.3171839",
"abstract": "With the goal of making contents easy to understand, memorize and share, a clear and easy-to-follow layout is important for visual notes. Unfortunately, since visual notes are often taken by the designers in real time while watching a video or listening to a presentation, the contents are usually not carefully structured, resulting in layouts that may be difficult for others to follow. In this paper, we address this problem by proposing a novel approach to automatically optimize the layouts of visual notes. Our approach predicts the design order of a visual note and then warps the contents along the predicted design order such that the visual note can be easier to follow and understand. At the core of our approach is a learning-based framework to reason about the element-wise design orders of visual notes. In particular, we first propose a hierarchical LSTM-based architecture to predict a grid-based design order of the visual note, based on the graphical and textual information. We then derive the element-wise order from the grid-based prediction. Such an idea allows our network to be weakly-supervised, i.e., making it possible to predict dense grid-based orders from visual notes with only coarse annotations. We evaluate the effectiveness of our approach on visual notes with diverse content densities and layouts. The results show that our network can predict plausible design orders for various types of visual notes and our approach can effectively optimize their layouts in order for them to be easier to follow.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the goal of making contents easy to understand, memorize and share, a clear and easy-to-follow layout is important for visual notes. Unfortunately, since visual notes are often taken by the designers in real time while watching a video or listening to a presentation, the contents are usually not carefully structured, resulting in layouts that may be difficult for others to follow. In this paper, we address this problem by proposing a novel approach to automatically optimize the layouts of visual notes. Our approach predicts the design order of a visual note and then warps the contents along the predicted design order such that the visual note can be easier to follow and understand. At the core of our approach is a learning-based framework to reason about the element-wise design orders of visual notes. In particular, we first propose a hierarchical LSTM-based architecture to predict a grid-based design order of the visual note, based on the graphical and textual information. We then derive the element-wise order from the grid-based prediction. Such an idea allows our network to be weakly-supervised, i.e., making it possible to predict dense grid-based orders from visual notes with only coarse annotations. We evaluate the effectiveness of our approach on visual notes with diverse content densities and layouts. The results show that our network can predict plausible design orders for various types of visual notes and our approach can effectively optimize their layouts in order for them to be easier to follow.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the goal of making contents easy to understand, memorize and share, a clear and easy-to-follow layout is important for visual notes. Unfortunately, since visual notes are often taken by the designers in real time while watching a video or listening to a presentation, the contents are usually not carefully structured, resulting in layouts that may be difficult for others to follow. In this paper, we address this problem by proposing a novel approach to automatically optimize the layouts of visual notes. Our approach predicts the design order of a visual note and then warps the contents along the predicted design order such that the visual note can be easier to follow and understand. At the core of our approach is a learning-based framework to reason about the element-wise design orders of visual notes. In particular, we first propose a hierarchical LSTM-based architecture to predict a grid-based design order of the visual note, based on the graphical and textual information. We then derive the element-wise order from the grid-based prediction. Such an idea allows our network to be weakly-supervised, i.e., making it possible to predict dense grid-based orders from visual notes with only coarse annotations. We evaluate the effectiveness of our approach on visual notes with diverse content densities and layouts. The results show that our network can predict plausible design orders for various types of visual notes and our approach can effectively optimize their layouts in order for them to be easier to follow.",
"title": "Design Order Guided Visual Note Optimization",
"normalizedTitle": "Design Order Guided Visual Note Optimization",
"fno": "09767765",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Layout",
"Visualization",
"Predictive Models",
"Hidden Markov Models",
"Optimization",
"Graphics",
"Annotations",
"Visual Note",
"Design Order",
"Layout Optimization"
],
"authors": [
{
"givenName": "Xiaotian",
"surname": "Qiao",
"fullName": "Xiaotian Qiao",
"affiliation": "Computer Science and Technology, Xidian University, 47905 Xian, Shaanxi, China, 710071",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ying",
"surname": "Cao",
"fullName": "Ying Cao",
"affiliation": "Computer Science, City University of Hong Kong, Kowloon Tong, Kowloon, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rynson W.H.",
"surname": "Lau",
"fullName": "Rynson W.H. Lau",
"affiliation": "Department of Computer Science, City University of Hong Kong, Hong Kong, Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/fie/2013/5261/0/06684970",
"title": "Shared note-taking using electronic enhanced guided notes: Peer-review activity, performance, and self-regulated learning skills",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2013/06684970/12OmNC2xhI6",
"parentPublication": {
"id": "proceedings/fie/2013/5261/0",
"title": "2013 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2007/2822/2/28220919",
"title": "Example-Based Logical Labeling of Document Title Page Images",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2007/28220919/12OmNrAMEQk",
"parentPublication": {
"id": "proceedings/icdar/2007/2822/2",
"title": "Ninth International Conference on Document Analysis and Recognition (ICDAR 2007)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2002/7402/2/05745027",
"title": "A coupled HMM for audio-visual speech recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2002/05745027/12OmNx8wTmZ",
"parentPublication": {
"id": "proceedings/icassp/2002/7402/2",
"title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2012/4752/0/06400494",
"title": "Visual cluster exploration of web clickstream data",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2012/06400494/12OmNxHJ9t7",
"parentPublication": {
"id": "proceedings/vast/2012/4752/0",
"title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2011/4541/0/4541a963",
"title": "D-Note: Computer-Aided Digital Note Taking System on Physical Book",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2011/4541a963/12OmNy5hRiG",
"parentPublication": {
"id": "proceedings/icig/2011/4541/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2002/7402/1/05743951",
"title": "On the use of high order derivatives for high performance alphabet recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2002/05743951/12OmNy7QfkR",
"parentPublication": {
"id": "proceedings/icassp/2002/7402/1",
"title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2012/1353/0/06462452",
"title": "Work in progress: Implementation of enhanced guided notes and collaborative note-taking in learning electric circuit concepts",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2012/06462452/12OmNzVGcPz",
"parentPublication": {
"id": "proceedings/fie/2012/1353/0",
"title": "2012 Frontiers in Education Conference Proceedings",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209a767",
"title": "Identifying Ragas in Indian Music",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209a767/12OmNzlUKqT",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2019/0858/0/09006403",
"title": "Patient-level Classification on Clinical Note Sequences Guided by Attributed Hierarchical Attention",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2019/09006403/1hJsy8LUegU",
"parentPublication": {
"id": "proceedings/big-data/2019/0858/0",
"title": "2019 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2020/9574/0/957400a263",
"title": "An Embedding-based Medical Note De-identification Approach with Minimal Annotation",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2020/957400a263/1pBMxV2vToc",
"parentPublication": {
"id": "proceedings/bibe/2020/9574/0",
"title": "2020 IEEE 20th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09767703",
"articleId": "1D4MIOrP28M",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09767766",
"articleId": "1D4MIYJDHpK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1D4MIYJDHpK",
"doi": "10.1109/TVCG.2022.3171794",
"abstract": "Sampling, grouping, and aggregation are three important components in the multi-scale analysis of point clouds. In this paper, we present a novel data-driven sampler learning strategy for point-wise analysis tasks. Unlike the widely used sampling technique, Farthest Point Sampling (FPS), we propose to learn sampling and downstream applications jointly. Our key insight is that uniform sampling methods like FPS are not always optimal for different tasks: sampling more points around boundary areas can make the point-wise classification easier for segmentation. Towards this end, we propose a novel sampler learning strategy that learns sampling point displacement supervised by task-related ground truth information and can be trained jointly with the underlying tasks. We further demonstrate our methods in various point-wise analysis tasks, including semantic part segmentation, point cloud completion, and keypoint detection. Our experiments show that jointly learning of the sampler and task brings better performance than using FPS in various point-based networks.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Sampling, grouping, and aggregation are three important components in the multi-scale analysis of point clouds. In this paper, we present a novel data-driven sampler learning strategy for point-wise analysis tasks. Unlike the widely used sampling technique, Farthest Point Sampling (FPS), we propose to learn sampling and downstream applications jointly. Our key insight is that uniform sampling methods like FPS are not always optimal for different tasks: sampling more points around boundary areas can make the point-wise classification easier for segmentation. Towards this end, we propose a novel sampler learning strategy that learns sampling point displacement supervised by task-related ground truth information and can be trained jointly with the underlying tasks. We further demonstrate our methods in various point-wise analysis tasks, including semantic part segmentation, point cloud completion, and keypoint detection. Our experiments show that jointly learning of the sampler and task brings better performance than using FPS in various point-based networks.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Sampling, grouping, and aggregation are three important components in the multi-scale analysis of point clouds. In this paper, we present a novel data-driven sampler learning strategy for point-wise analysis tasks. Unlike the widely used sampling technique, Farthest Point Sampling (FPS), we propose to learn sampling and downstream applications jointly. Our key insight is that uniform sampling methods like FPS are not always optimal for different tasks: sampling more points around boundary areas can make the point-wise classification easier for segmentation. Towards this end, we propose a novel sampler learning strategy that learns sampling point displacement supervised by task-related ground truth information and can be trained jointly with the underlying tasks. We further demonstrate our methods in various point-wise analysis tasks, including semantic part segmentation, point cloud completion, and keypoint detection. Our experiments show that jointly learning of the sampler and task brings better performance than using FPS in various point-based networks.",
"title": "Task-Aware Sampling Layer for Point-Wise Analysis",
"normalizedTitle": "Task-Aware Sampling Layer for Point-Wise Analysis",
"fno": "09767766",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Task Analysis",
"Point Cloud Compression",
"Three Dimensional Displays",
"Semantics",
"Convolution",
"Computer Architecture",
"Training",
"3 D Vision",
"Point Cloud",
"Point Sampling",
"Point Cloud Segmentation"
],
"authors": [
{
"givenName": "Yiqun",
"surname": "Lin",
"fullName": "Yiqun Lin",
"affiliation": "School of Science and Engineering, The Chinese University of Hong Kong - Shenzhen, 407605 Shenzhen, Guangdong, China, 518172",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lichang",
"surname": "Chen",
"fullName": "Lichang Chen",
"affiliation": "Electrical and Computer Engineering, University of Pittsburgh, 6614 Pittsburgh, Pennsylvania, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haibin",
"surname": "Huang",
"fullName": "Haibin Huang",
"affiliation": "N/A, Kuaishou Technology, Palo Alto, California, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chongyang",
"surname": "Ma",
"fullName": "Chongyang Ma",
"affiliation": "YLab, Kwai Inc., San Francisco, California, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaoguang",
"surname": "Han",
"fullName": "Xiaoguang Han",
"affiliation": "School of Science and Engineering, The Chinese University of Hong Kong - Shenzhen, 407605 Shenzhen, Guangdong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shuguang",
"surname": "Cui",
"fullName": "Shuguang Cui",
"affiliation": "Shenzhen Research Institute of Big Data and Future Network of Intelligence Institute (FNii), The Chinese University of Hong Kong - Shenzhen, 407605 Shenzhen, Guangdong, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2021/2812/0/281200c723",
"title": "Improving 3D Object Detection with Channel-wise Transformer",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200c723/1BmFZSLSRRm",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200g108",
"title": "Sampling Network Guided Cross-Entropy Method for Unsupervised Point Cloud Registration",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200g108/1BmGp36SSvm",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200h315",
"title": "Unsupervised Point Cloud Object Co-segmentation by Co-contrastive Learning and Mutual Attention Sampling",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200h315/1BmLfkQYoz6",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900e418",
"title": "PointMotionNet: Point-Wise Motion Learning for Large-Scale LiDAR Point Clouds Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900e418/1G563yaYq1q",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icipmc/2022/6872/0/687200a046",
"title": "SmoothNet: Smooth Point Cloud Up-sampling",
"doi": null,
"abstractUrl": "/proceedings-article/icipmc/2022/687200a046/1GIunwuTaow",
"parentPublication": {
"id": "proceedings/icipmc/2022/6872/0",
"title": "2022 International Conference on Image Processing and Media Computing (ICIPMC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600g328",
"title": "IDEA-Net: Dynamic 3D Point Cloud Interpolation via Deep Embedding Alignment",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600g328/1H1hTQlrJkY",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600g771",
"title": "GPV-Pose: Category-level Object Pose Estimation via Geometry-guided Point-wise Voting",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600g771/1H1hx09yd2g",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800h575",
"title": "SampleNet: Differentiable Point Cloud Sampling",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800h575/1m3nlNK5m5q",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800f588",
"title": "PointASNL: Robust Point Clouds Processing Using Nonlocal Neural Networks With Adaptive Sampling",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800f588/1m3nznsZceQ",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/12/09627565",
"title": "PRIN/SPRIN: On Extracting Point-Wise Rotation Invariant Features",
"doi": null,
"abstractUrl": "/journal/tp/2022/12/09627565/1yORKbr1kCQ",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09767703",
"articleId": "1D4MIOrP28M",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09767606",
"articleId": "1D4MJ8H1fk4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1D4MIotOemQ",
"doi": "10.1109/TVCG.2022.3171179",
"abstract": "We present a practical framework to port Bzier curves to surfaces. We support the interactive drawing and editing of Bzier splines on manifold meshes with millions of triangles, by relying on just repeated manifold averages. We show that direct extensions of the de Casteljau and Bernstein evaluation algorithms to the manifold setting are fragile, and prone to discontinuities when control polygons become large. Conversely, approaches based on subdivision are robust and can be implemented efficiently. We implement manifold extensions of the recursive de Casteljau bisection, and an open-uniform Lane-Riesenfeld subdivision scheme. For both schemes, we present algorithms for curve tracing, point evaluation, and approximated point insertion. We run bulk experiments to test our algorithms for robustness and performance, and we compare them with other methods at the state of the art, always achieving correct results and superior performance. For interactive editing, we port all the basic user interface interactions found in 2D tools directly to the mesh. We also support mapping complex SVG drawings to the mesh and their interactive editing.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a practical framework to port Bzier curves to surfaces. We support the interactive drawing and editing of Bzier splines on manifold meshes with millions of triangles, by relying on just repeated manifold averages. We show that direct extensions of the de Casteljau and Bernstein evaluation algorithms to the manifold setting are fragile, and prone to discontinuities when control polygons become large. Conversely, approaches based on subdivision are robust and can be implemented efficiently. We implement manifold extensions of the recursive de Casteljau bisection, and an open-uniform Lane-Riesenfeld subdivision scheme. For both schemes, we present algorithms for curve tracing, point evaluation, and approximated point insertion. We run bulk experiments to test our algorithms for robustness and performance, and we compare them with other methods at the state of the art, always achieving correct results and superior performance. For interactive editing, we port all the basic user interface interactions found in 2D tools directly to the mesh. We also support mapping complex SVG drawings to the mesh and their interactive editing.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a practical framework to port Bzier curves to surfaces. We support the interactive drawing and editing of Bzier splines on manifold meshes with millions of triangles, by relying on just repeated manifold averages. We show that direct extensions of the de Casteljau and Bernstein evaluation algorithms to the manifold setting are fragile, and prone to discontinuities when control polygons become large. Conversely, approaches based on subdivision are robust and can be implemented efficiently. We implement manifold extensions of the recursive de Casteljau bisection, and an open-uniform Lane-Riesenfeld subdivision scheme. For both schemes, we present algorithms for curve tracing, point evaluation, and approximated point insertion. We run bulk experiments to test our algorithms for robustness and performance, and we compare them with other methods at the state of the art, always achieving correct results and superior performance. For interactive editing, we port all the basic user interface interactions found in 2D tools directly to the mesh. We also support mapping complex SVG drawings to the mesh and their interactive editing.",
"title": "b/Surf: Interactive Bézier Splines on Surface Meshes",
"normalizedTitle": "b/Surf: Interactive Bézier Splines on Surface Meshes",
"fno": "09767783",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Manifolds",
"Splines Mathematics",
"Measurement",
"Approximation Algorithms",
"Visualization",
"Robustness",
"Geometry",
"Geometric Meshes",
"Spline Curves",
"User Interfaces",
"Geometry Processing"
],
"authors": [
{
"givenName": "Claudio",
"surname": "Mancinelli",
"fullName": "Claudio Mancinelli",
"affiliation": "DIBRIS, University of Genoa, 9302 Genova, Genova, Italy",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Giacomo",
"surname": "Nazzaro",
"fullName": "Giacomo Nazzaro",
"affiliation": "Computer Science, Sapienza University of Rome, Rome, RM, Italy",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Fabio",
"surname": "Pellacini",
"fullName": "Fabio Pellacini",
"affiliation": "Computer Science, Sapienza University of Rome, Rome, RM, Italy",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Enrico",
"surname": "Puppo",
"fullName": "Enrico Puppo",
"affiliation": "DIBRIS, University of Genoa, 9302 Genova, Genova, Italy, 16146",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2005/2397/0/23970419",
"title": "Curve Approximation with Quadratic B-Splines",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2005/23970419/12OmNBuL1bt",
"parentPublication": {
"id": "proceedings/iv/2005/2397/0",
"title": "Ninth International Conference on Information Visualisation (IV'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2014/4284/0/4284a132",
"title": "A Geometric Interpolation Algorithm by Non-uniform Cubic B-Splines",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2014/4284a132/12OmNCcKQj3",
"parentPublication": {
"id": "proceedings/icdh/2014/4284/0",
"title": "2014 5th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2000/0868/0/08680184",
"title": "FEM-Based Dynamic Subdivision Splines",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2000/08680184/12OmNqFrGMq",
"parentPublication": {
"id": "proceedings/pg/2000/0868/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/06977413",
"title": "Piecewise-Bézier C1 Interpolation on Riemannian Manifolds with Application to 2D Shape Morphing",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/06977413/12OmNzYNN1E",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2004/03/v0326",
"title": "Generalized B-Spline Subdivision-Surface Wavelets for Geometry Compression",
"doi": null,
"abstractUrl": "/journal/tg/2004/03/v0326/13rRUwjGoLw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/09/07558244",
"title": "Knot Optimization for Biharmonic B-splines on Manifold Triangle Meshes",
"doi": null,
"abstractUrl": "/journal/tg/2017/09/07558244/13rRUxly95G",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1997/03/v0228",
"title": "Scattered Data Interpolation with Multilevel B-Splines",
"doi": null,
"abstractUrl": "/journal/tg/1997/03/v0228/13rRUxly9dH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10066194",
"title": "Robust Coarse Cage Construction with Small Approximation Errors",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10066194/1LoWzFuMlMc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cscc/2020/6503/0/650300a140",
"title": "Non-Polynomial Splines and Solving the Heat Equation",
"doi": null,
"abstractUrl": "/proceedings-article/cscc/2020/650300a140/1t2mTyJbknC",
"parentPublication": {
"id": "proceedings/cscc/2020/6503/0",
"title": "2020 24th International Conference on Circuits, Systems, Communications and Computers (CSCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cscc/2020/6503/0/650300a126",
"title": "On approximation with the continuous polynomial cubic splines",
"doi": null,
"abstractUrl": "/proceedings-article/cscc/2020/650300a126/1t2mWq0wR3O",
"parentPublication": {
"id": "proceedings/cscc/2020/6503/0",
"title": "2020 24th International Conference on Circuits, Systems, Communications and Computers (CSCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09766081",
"articleId": "1D34HQ1zUNa",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09767703",
"articleId": "1D4MIOrP28M",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1D34HQ1zUNa",
"doi": "10.1109/TVCG.2022.3169980",
"abstract": "Learning physics is often difficult for students because concepts such as electricity, magnetism and sound, cannot be seen with the naked eye. Emerging technologies such as Augmented Reality (AR) can transform education by making challenging concepts visible and accessible to novices. We present a Hololens-based augmented reality system where collaborators learn about the invisible electromagnetism phenomena involved in audio speakers, and we measure the benefits of AR technology through quantitative and qualitative methods. Specifically, we measure learning (knowledge gains and transfer) and collaborative knowledge exchange behaviors. Our results indicate that, while AR generally provides a novelty effect, specific educational AR visualizations can be both beneficial and detrimental to learning they helped students to learn spatial content and structural relationships, but hindered their understanding of kinesthetic content. Furthermore, AR facilitated learning in collaborations by providing representational common ground, which improved communication and peer teaching. We discuss these effects, as well as identify factors that have positive impact (e.g., co-located representations, easier access to resources, better grounding) or negative impact (e.g., tunnel vision, overlooking kinesthetic feedback) on student collaborative learning with augmented reality applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Learning physics is often difficult for students because concepts such as electricity, magnetism and sound, cannot be seen with the naked eye. Emerging technologies such as Augmented Reality (AR) can transform education by making challenging concepts visible and accessible to novices. We present a Hololens-based augmented reality system where collaborators learn about the invisible electromagnetism phenomena involved in audio speakers, and we measure the benefits of AR technology through quantitative and qualitative methods. Specifically, we measure learning (knowledge gains and transfer) and collaborative knowledge exchange behaviors. Our results indicate that, while AR generally provides a novelty effect, specific educational AR visualizations can be both beneficial and detrimental to learning they helped students to learn spatial content and structural relationships, but hindered their understanding of kinesthetic content. Furthermore, AR facilitated learning in collaborations by providing representational common ground, which improved communication and peer teaching. We discuss these effects, as well as identify factors that have positive impact (e.g., co-located representations, easier access to resources, better grounding) or negative impact (e.g., tunnel vision, overlooking kinesthetic feedback) on student collaborative learning with augmented reality applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Learning physics is often difficult for students because concepts such as electricity, magnetism and sound, cannot be seen with the naked eye. Emerging technologies such as Augmented Reality (AR) can transform education by making challenging concepts visible and accessible to novices. We present a Hololens-based augmented reality system where collaborators learn about the invisible electromagnetism phenomena involved in audio speakers, and we measure the benefits of AR technology through quantitative and qualitative methods. Specifically, we measure learning (knowledge gains and transfer) and collaborative knowledge exchange behaviors. Our results indicate that, while AR generally provides a novelty effect, specific educational AR visualizations can be both beneficial and detrimental to learning they helped students to learn spatial content and structural relationships, but hindered their understanding of kinesthetic content. Furthermore, AR facilitated learning in collaborations by providing representational common ground, which improved communication and peer teaching. We discuss these effects, as well as identify factors that have positive impact (e.g., co-located representations, easier access to resources, better grounding) or negative impact (e.g., tunnel vision, overlooking kinesthetic feedback) on student collaborative learning with augmented reality applications.",
"title": "How Augmented Reality (AR) Can Help and Hinder Collaborative Learning: A Study of AR in Electromagnetism Education",
"normalizedTitle": "How Augmented Reality (AR) Can Help and Hinder Collaborative Learning: A Study of AR in Electromagnetism Education",
"fno": "09766081",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Augmented Reality",
"Visualization",
"Collaborative Work",
"Collaboration",
"Magnetic Fields",
"Headphones",
"Education",
"Augmented Reality",
"Collaboration",
"Education",
"Makerspaces"
],
"authors": [
{
"givenName": "Iulian",
"surname": "Radu",
"fullName": "Iulian Radu",
"affiliation": "Graduate School of Education, Harvard University, 1812 Cambridge, Massachusetts, United States, 02138",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bertrand",
"surname": "Schneider",
"fullName": "Bertrand Schneider",
"affiliation": "Technology, Innovation, Education, Harvard Graduate School of Education, 80330 Cambridge, Massachusetts, United States",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-04-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2016/3641/0/3641a107",
"title": "The Influence of using Augmented Reality on Textbook Support for Learners of Different Learning Styles",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2016/3641a107/12OmNBzAciw",
"parentPublication": {
"id": "proceedings/ismar/2016/3641/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836510",
"title": "Integrating Building Information Modeling with Augmented Reality for Interdisciplinary Learning",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836510/12OmNCgrD16",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2015/7334/0/7334a132",
"title": "Augmented Reality Laboratory for High School Electrochemistry Course",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2015/7334a132/12OmNqBbHAA",
"parentPublication": {
"id": "proceedings/icalt/2015/7334/0",
"title": "2015 IEEE 15th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icebe/2016/6119/0/6119a129",
"title": "Applying Augmented Reality Technology to E-Learning: Science Educational AR Products as an Example",
"doi": null,
"abstractUrl": "/proceedings-article/icebe/2016/6119a129/12OmNs4S8DX",
"parentPublication": {
"id": "proceedings/icebe/2016/6119/0",
"title": "2016 IEEE 13th International Conference on e-Business Engineering (ICEBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icitcs/2014/6541/0/07021784",
"title": "JPEG-AR Standard Enabling Augmented Marketing",
"doi": null,
"abstractUrl": "/proceedings-article/icitcs/2014/07021784/12OmNxj239c",
"parentPublication": {
"id": "proceedings/icitcs/2014/6541/0",
"title": "2014 International Conference on IT Convergence and Security (ICITCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948402",
"title": "AR-IVI — Implementation of In-Vehicle Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948402/12OmNySosKY",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2019/1198/0/119800a389",
"title": "Implementation of Augmented Reality Globe in Teaching-Learning Environment",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2019/119800a389/19wB38QGJS8",
"parentPublication": {
"id": "proceedings/mipr/2019/1198/0",
"title": "2019 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797908",
"title": "Determining Design Requirements for AR Physics Education Applications",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797908/1cJ11eG0SeA",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2020/6090/0/09155919",
"title": "Effects of Augmented Reality Assisted Learning Materials on Students’ Learning Outcomes",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2020/09155919/1m1j7NOETSg",
"parentPublication": {
"id": "proceedings/icalt/2020/6090/0",
"title": "2020 IEEE 20th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a090",
"title": "First Steps Towards Augmented Reality Interactive Electronic Music Production",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a090/1tnWYWjfAFa",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09765476",
"articleId": "1CY3PmkyDMk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09767783",
"articleId": "1D4MIotOemQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1CY3PmkyDMk",
"doi": "10.1109/TVCG.2022.3169590",
"abstract": "The visualization of results while the simulation is running is increasingly common in extreme scale computing environments. We present a novel approach for in situ generation of image databases to achieve cost savings on supercomputers. Our approach, a hybrid between traditional inline and in transit techniques, dynamically distributes visualization tasks between simulation nodes and visualization nodes, using probing as a basis to estimate rendering cost. Our hybrid design differs from previous works in that it creates opportunities to minimize idle time from four fundamental types of inefficiency: variability, limited scalability, overhead, and rightsizing. We demonstrate our results by comparing our method against both inline and in transit methods for a variety of configurations, including two simulation codes and a scaling study that goes above 19K cores. Our findings show that our approach is superior in many configurations. As in situ visualization becomes increasingly ubiquitous, we believe our technique could lead to significant amounts of reclaimed cycles on supercomputers.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The visualization of results while the simulation is running is increasingly common in extreme scale computing environments. We present a novel approach for in situ generation of image databases to achieve cost savings on supercomputers. Our approach, a hybrid between traditional inline and in transit techniques, dynamically distributes visualization tasks between simulation nodes and visualization nodes, using probing as a basis to estimate rendering cost. Our hybrid design differs from previous works in that it creates opportunities to minimize idle time from four fundamental types of inefficiency: variability, limited scalability, overhead, and rightsizing. We demonstrate our results by comparing our method against both inline and in transit methods for a variety of configurations, including two simulation codes and a scaling study that goes above 19K cores. Our findings show that our approach is superior in many configurations. As in situ visualization becomes increasingly ubiquitous, we believe our technique could lead to significant amounts of reclaimed cycles on supercomputers.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The visualization of results while the simulation is running is increasingly common in extreme scale computing environments. We present a novel approach for in situ generation of image databases to achieve cost savings on supercomputers. Our approach, a hybrid between traditional inline and in transit techniques, dynamically distributes visualization tasks between simulation nodes and visualization nodes, using probing as a basis to estimate rendering cost. Our hybrid design differs from previous works in that it creates opportunities to minimize idle time from four fundamental types of inefficiency: variability, limited scalability, overhead, and rightsizing. We demonstrate our results by comparing our method against both inline and in transit methods for a variety of configurations, including two simulation codes and a scaling study that goes above 19K cores. Our findings show that our approach is superior in many configurations. As in situ visualization becomes increasingly ubiquitous, we believe our technique could lead to significant amounts of reclaimed cycles on supercomputers.",
"title": "A Hybrid In Situ Approach for Cost Efficient Image Database Generation",
"normalizedTitle": "A Hybrid In Situ Approach for Cost Efficient Image Database Generation",
"fno": "09765476",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Task Analysis",
"Costs",
"Data Models",
"Supercomputers",
"Computational Modeling",
"Scalability",
"Visualization",
"High Performance Computing",
"In Situ"
],
"authors": [
{
"givenName": "Valentin",
"surname": "Bruder",
"fullName": "Valentin Bruder",
"affiliation": "VISUS, Universitat Stuttgart, 9149 Stuttgart, Baden-Wrttemberg, Germany, 70569",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Matthew",
"surname": "Larsen",
"fullName": "Matthew Larsen",
"affiliation": "Lawrence Livermore National Laboratory, 4578 Livermore, California, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Thomas",
"surname": "Ertl",
"fullName": "Thomas Ertl",
"affiliation": "Institut fuer Visualisierung und Interaktive Systeme, Universitaet Stuttgart, Stuttgart, BW, Germany, 70569",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hank",
"surname": "Childs",
"fullName": "Hank Childs",
"affiliation": "Computer and Information Science, University of Oregon, Eugene, Oregon, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Steffen",
"surname": "Frey",
"fullName": "Steffen Frey",
"affiliation": "Bernoulli Institute of Mathematics, Computer Science and Artificial Intelligence, University of Groningen, 3647 Groningen, Groningen, Netherlands, 9700 AB",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-04-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/sc/2012/0806/0/1000a089",
"title": "Combining in-situ and in-transit processing to enable extreme-scale scientific analysis",
"doi": null,
"abstractUrl": "/proceedings-article/sc/2012/1000a089/12OmNAY79dw",
"parentPublication": {
"id": "proceedings/sc/2012/0806/0",
"title": "SC Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2014/5666/0/07004275",
"title": "In-situ visualization and computational steering for large-scale simulation of turbulent flows in complex geometries",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2014/07004275/12OmNrMHOiY",
"parentPublication": {
"id": "proceedings/big-data/2014/5666/0",
"title": "2014 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipdpsw/2016/3682/0/3682b014",
"title": "High Performance Molecular Visualization: In-Situ and Parallel Rendering with EGL",
"doi": null,
"abstractUrl": "/proceedings-article/ipdpsw/2016/3682b014/12OmNxaNGq2",
"parentPublication": {
"id": "proceedings/ipdpsw/2016/3682/0",
"title": "2016 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sc/2016/8815/0/8815a276",
"title": "Performance Modeling of In Situ Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/sc/2016/8815a276/12OmNyTfg9O",
"parentPublication": {
"id": "proceedings/sc/2016/8815/0",
"title": "SC16: International Conference for High Performance Computing, Networking, Storage and Analysis (SC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2018/01/mcg2018010119",
"title": "Optimizing Scientist Time through In Situ Visualization and Analysis",
"doi": null,
"abstractUrl": "/magazine/cg/2018/01/mcg2018010119/13rRUwciPhU",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sc/2022/5444/0/544400a277",
"title": "SERVIZ: A Shared In Situ Visualization Service",
"doi": null,
"abstractUrl": "/proceedings-article/sc/2022/544400a277/1I0bSRo24JW",
"parentPublication": {
"id": "proceedings/sc/2022/5444/0/",
"title": "SC22: International Conference for High Performance Computing, Networking, Storage and Analysis",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/e-science/2022/6124/0/612400a182",
"title": "SIM-SITU: A Framework for the Faithful Simulation of in situ Processing",
"doi": null,
"abstractUrl": "/proceedings-article/e-science/2022/612400a182/1J6hxD8UJYA",
"parentPublication": {
"id": "proceedings/e-science/2022/6124/0",
"title": "2022 IEEE 18th International Conference on e-Science (e-Science)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sc/2022/5444/0/544400a277",
"title": "SERVIZ: A Shared In Situ Visualization Service",
"doi": null,
"abstractUrl": "/proceedings-article/sc/2022/544400a277/1L07qOrSuAg",
"parentPublication": {
"id": "proceedings/sc/2022/5444/0/",
"title": "SC22: International Conference for High Performance Computing, Networking, Storage and Analysis",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/escience/2019/2451/0/245100a188",
"title": "Characterizing In Situ and In Transit Analytics of Molecular Dynamics Simulations for Next-Generation Supercomputers",
"doi": null,
"abstractUrl": "/proceedings-article/escience/2019/245100a188/1ike1Fvh6Te",
"parentPublication": {
"id": "proceedings/escience/2019/2451/0",
"title": "2019 15th International Conference on eScience (eScience)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipdpsw/2021/3577/0/357700a960",
"title": "Facilitating Staging-based Unstructured Mesh Processing to Support Hybrid In-Situ Workflows",
"doi": null,
"abstractUrl": "/proceedings-article/ipdpsw/2021/357700a960/1uHgGMv8VsA",
"parentPublication": {
"id": "proceedings/ipdpsw/2021/3577/0",
"title": "2021 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09765706",
"articleId": "1CY3PcYVKaA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09766081",
"articleId": "1D34HQ1zUNa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1D34I6jOiIM",
"name": "ttg555501-09765476s1-supp1-3169590.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09765476s1-supp1-3169590.pdf",
"extension": "pdf",
"size": "8.8 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1CY3PcYVKaA",
"doi": "10.1109/TVCG.2022.3171443",
"abstract": "Environmental sensors provide crucial data for understanding our surroundings. For example, air quality maps based on sensor readings help users make decisions to mitigate the effects of pollution on their health. Standard maps show readings from individual sensors or colored contours indicating estimated pollution levels. However, showing a single estimate may conceal uncertainty and lead to underestimation of risk, while showing sensor data yields varied interpretations. We present several visualizations of uncertainty in air quality maps, including a frequency-framing ‘`dotmap’' and small multiples, and we compare them with standard contour and sensor-based maps. In a user study, we find that including uncertainty in maps has a significant effect on how much users would choose to reduce physical activity, and that people make more cautious decisions when using uncertainty-aware maps. Additionally, we analyze think-aloud transcriptions from the experiment to understand more about how the representation of uncertainty influences people's decision-making. Our results suggest ways to design maps of sensor data that can encourage certain types of reasoning, yield more consistent responses, and convey risk better than standard maps.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Environmental sensors provide crucial data for understanding our surroundings. For example, air quality maps based on sensor readings help users make decisions to mitigate the effects of pollution on their health. Standard maps show readings from individual sensors or colored contours indicating estimated pollution levels. However, showing a single estimate may conceal uncertainty and lead to underestimation of risk, while showing sensor data yields varied interpretations. We present several visualizations of uncertainty in air quality maps, including a frequency-framing ‘`dotmap’' and small multiples, and we compare them with standard contour and sensor-based maps. In a user study, we find that including uncertainty in maps has a significant effect on how much users would choose to reduce physical activity, and that people make more cautious decisions when using uncertainty-aware maps. Additionally, we analyze think-aloud transcriptions from the experiment to understand more about how the representation of uncertainty influences people's decision-making. Our results suggest ways to design maps of sensor data that can encourage certain types of reasoning, yield more consistent responses, and convey risk better than standard maps.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Environmental sensors provide crucial data for understanding our surroundings. For example, air quality maps based on sensor readings help users make decisions to mitigate the effects of pollution on their health. Standard maps show readings from individual sensors or colored contours indicating estimated pollution levels. However, showing a single estimate may conceal uncertainty and lead to underestimation of risk, while showing sensor data yields varied interpretations. We present several visualizations of uncertainty in air quality maps, including a frequency-framing ‘`dotmap’' and small multiples, and we compare them with standard contour and sensor-based maps. In a user study, we find that including uncertainty in maps has a significant effect on how much users would choose to reduce physical activity, and that people make more cautious decisions when using uncertainty-aware maps. Additionally, we analyze think-aloud transcriptions from the experiment to understand more about how the representation of uncertainty influences people's decision-making. Our results suggest ways to design maps of sensor data that can encourage certain types of reasoning, yield more consistent responses, and convey risk better than standard maps.",
"title": "Communicating Uncertainty and Risk in Air Quality Maps",
"normalizedTitle": "Communicating Uncertainty and Risk in Air Quality Maps",
"fno": "09765706",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Sensors",
"Uncertainty",
"Air Pollution",
"Interpolation",
"Sensor Phenomena And Characterization",
"Standards",
"Image Color Analysis"
],
"authors": [
{
"givenName": "Annie",
"surname": "Preston",
"fullName": "Annie Preston",
"affiliation": "Computer Science, University of California Davis, Davis, California, United States, 95616",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kwan-Liu",
"surname": "Ma",
"fullName": "Kwan-Liu Ma",
"affiliation": "Computer Science, University of California at Davis, Davis, California, United States, 95616-8562",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-04-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icim/2009/3911/0/3911a037",
"title": "Gray Level Analysis Based on Gray Numerical Model of Air Pollution in Handan City",
"doi": null,
"abstractUrl": "/proceedings-article/icim/2009/3911a037/12OmNAXPy9k",
"parentPublication": {
"id": "proceedings/icim/2009/3911/0",
"title": "Innovation Management, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/esiat/2009/3682/2/3682b546",
"title": "The Application of Zigbee Based Wireless Sensor Network and GIS in the Air Pollution Monitoring",
"doi": null,
"abstractUrl": "/proceedings-article/esiat/2009/3682b546/12OmNCw3zai",
"parentPublication": {
"id": "esiat/2009/3682/2",
"title": "Environmental Science and Information Application Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/lcn-workshops/2013/0540/0/06758498",
"title": "HazeWatch: A participatory sensor system for monitoring air pollution in Sydney",
"doi": null,
"abstractUrl": "/proceedings-article/lcn-workshops/2013/06758498/12OmNrJAed6",
"parentPublication": {
"id": "proceedings/lcn-workshops/2013/0540/0",
"title": "2013 IEEE 38th Conference on Local Computer Networks Workshops (LCN Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2016/5910/0/07836819",
"title": "airVLC: An Application for Visualizing Wind-Sensitive Interpolation of Urban Air Pollution Forecasts",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2016/07836819/12OmNrkBwCc",
"parentPublication": {
"id": "proceedings/icdmw/2016/5910/0",
"title": "2016 IEEE 16th International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cesce/2010/3972/1/3972a321",
"title": "Study on Gray Numerical Model of Air Pollution in Wuan City",
"doi": null,
"abstractUrl": "/proceedings-article/cesce/2010/3972a321/12OmNxZBSzi",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcoss/2015/8856/0/8856a011",
"title": "High Resolution Air Pollution Maps in Urban Environments Using Mobile Sensor Networks",
"doi": null,
"abstractUrl": "/proceedings-article/dcoss/2015/8856a011/12OmNxeutbh",
"parentPublication": {
"id": "proceedings/dcoss/2015/8856/0",
"title": "2015 International Conference on Distributed Computing in Sensor Systems (DCOSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/services/2011/4461/0/4461a578",
"title": "Visualization of Urban Air Pollution with Cloud Computing",
"doi": null,
"abstractUrl": "/proceedings-article/services/2011/4461a578/12OmNzvhvMo",
"parentPublication": {
"id": "proceedings/services/2011/4461/0",
"title": "2011 IEEE World Congress on Services",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/nt/2019/04/08750870",
"title": "On the Deployment of Wireless Sensor Networks for Air Quality Mapping: Optimization Models and Algorithms",
"doi": null,
"abstractUrl": "/journal/nt/2019/04/08750870/1bemf5zUDV6",
"parentPublication": {
"id": "trans/nt",
"title": "IEEE/ACM Transactions on Networking",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigcomp/2020/6034/0/603400a055",
"title": "Spatiotemporal Deep Learning Model for Citywide Air Pollution Interpolation and Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/bigcomp/2020/603400a055/1jdDAeLv8FW",
"parentPublication": {
"id": "proceedings/bigcomp/2020/6034/0",
"title": "2020 IEEE International Conference on Big Data and Smart Computing (BigComp)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iotdi/2020/6602/0/660200a014",
"title": "MapTransfer : Urban Air Quality Map Generation for Downscaled Sensor Deployments",
"doi": null,
"abstractUrl": "/proceedings-article/iotdi/2020/660200a014/1k0P4IYqeZi",
"parentPublication": {
"id": "proceedings/iotdi/2020/6602/0",
"title": "2020 IEEE/ACM Fifth International Conference on Internet-of-Things Design and Implementation (IoTDI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09765327",
"articleId": "1CWoKyrHUze",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09765476",
"articleId": "1CY3PmkyDMk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1D34HAQk3te",
"name": "ttg555501-09765706s1-supp1-3171443.png",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09765706s1-supp1-3171443.png",
"extension": "png",
"size": "14.1 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1CWoKyrHUze",
"doi": "10.1109/TVCG.2022.3170531",
"abstract": "An exemplar is an entity that represents a desirable instance in a multi-attribute configuration space. It offers certain strengths in some of its attributes without unduly compromising the strengths in other attributes. Exemplars are frequently sought after in real life applications, such as systems engineering, investment banking, drug advisory, product marketing and many others. We study a specific method for the visualization of multi-attribute configuration spaces, the Data Context Map (DCM), for its capacity in enabling users to identify proper exemplars. The DCM produces a 2D embedding where users can view the data objects in the context of the data attributes. We ask whether certain graphical enhancements can aid users to gain a better understanding of the attribute-wise tradeoffs and so select better exemplar sets. We conducted several user studies for three different graphical designs, namely iso-contour, value-shaded topographic rendering and terrain topographic rendering, and compare these with a baseline DCM display. As a benchmark we use an exemplar set generated via Pareto optimization which has similar goals but unlike humans can operate in the native high-dimensional data space. Our study finds that the two topographic maps are statistically superior to both the iso-contour and the DCM baseline display.",
"abstracts": [
{
"abstractType": "Regular",
"content": "An exemplar is an entity that represents a desirable instance in a multi-attribute configuration space. It offers certain strengths in some of its attributes without unduly compromising the strengths in other attributes. Exemplars are frequently sought after in real life applications, such as systems engineering, investment banking, drug advisory, product marketing and many others. We study a specific method for the visualization of multi-attribute configuration spaces, the Data Context Map (DCM), for its capacity in enabling users to identify proper exemplars. The DCM produces a 2D embedding where users can view the data objects in the context of the data attributes. We ask whether certain graphical enhancements can aid users to gain a better understanding of the attribute-wise tradeoffs and so select better exemplar sets. We conducted several user studies for three different graphical designs, namely iso-contour, value-shaded topographic rendering and terrain topographic rendering, and compare these with a baseline DCM display. As a benchmark we use an exemplar set generated via Pareto optimization which has similar goals but unlike humans can operate in the native high-dimensional data space. Our study finds that the two topographic maps are statistically superior to both the iso-contour and the DCM baseline display.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "An exemplar is an entity that represents a desirable instance in a multi-attribute configuration space. It offers certain strengths in some of its attributes without unduly compromising the strengths in other attributes. Exemplars are frequently sought after in real life applications, such as systems engineering, investment banking, drug advisory, product marketing and many others. We study a specific method for the visualization of multi-attribute configuration spaces, the Data Context Map (DCM), for its capacity in enabling users to identify proper exemplars. The DCM produces a 2D embedding where users can view the data objects in the context of the data attributes. We ask whether certain graphical enhancements can aid users to gain a better understanding of the attribute-wise tradeoffs and so select better exemplar sets. We conducted several user studies for three different graphical designs, namely iso-contour, value-shaded topographic rendering and terrain topographic rendering, and compare these with a baseline DCM display. As a benchmark we use an exemplar set generated via Pareto optimization which has similar goals but unlike humans can operate in the native high-dimensional data space. Our study finds that the two topographic maps are statistically superior to both the iso-contour and the DCM baseline display.",
"title": "Graphical Enhancements for Effective Exemplar Identification in Contextual Data Visualizations",
"normalizedTitle": "Graphical Enhancements for Effective Exemplar Identification in Contextual Data Visualizations",
"fno": "09765327",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Task Analysis",
"Visualization",
"Optimization",
"Layout",
"Location Awareness",
"Pareto Optimization",
"High Dimensional Data",
"Multivariate Data",
"Contextual Displays",
"Exemplar Generation",
"Decision Support",
"Configuration Space"
],
"authors": [
{
"givenName": "Xinyu",
"surname": "Zhang",
"fullName": "Xinyu Zhang",
"affiliation": "Computer Science, Stony Brook University College of Engineering and Applied Sciences, 189653 Stony Brook, New York, United States, 11794-2200",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shenghui",
"surname": "Cheng",
"fullName": "Shenghui Cheng",
"affiliation": "School of Engineering, Westlake University, 557712 Hangzhou, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Klaus",
"surname": "Mueller",
"fullName": "Klaus Mueller",
"affiliation": "Computer Science, State University of New York at Stony Brook, Stony Brook, New York, United States, 11794-4400",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-04-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cis/2014/7434/0/7434a090",
"title": "An Exemplar-Based Hidden Markov Model with Discriminative Visual Features for Lipreading",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2014/7434a090/12OmNs0TKTO",
"parentPublication": {
"id": "proceedings/cis/2014/7434/0",
"title": "2014 Tenth International Conference on Computational Intelligence and Security (CIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391b994",
"title": "Visual Phrases for Exemplar Face Detection",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391b994/12OmNxeM46N",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2012/4711/0/4711a146",
"title": "Per-Exemplar Fusion Learning for Video Retrieval and Recounting",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2012/4711a146/12OmNyo1o5y",
"parentPublication": {
"id": "proceedings/icme/2012/4711/0",
"title": "2012 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118b843",
"title": "Efficient Boosted Exemplar-Based Face Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118b843/12OmNzlD9EA",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851f723",
"title": "Exemplar-Driven Top-Down Saliency Detection via Deep Association",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851f723/12OmNzy7uQH",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/sc/2021/04/08428472",
"title": "Mining Set of Interested Communities with Limited Exemplar Nodes for Network Based Services",
"doi": null,
"abstractUrl": "/journal/sc/2021/04/08428472/13rRUwcAqnF",
"parentPublication": {
"id": "trans/sc",
"title": "IEEE Transactions on Services Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/06/ttg2009061161",
"title": "Exemplar-based Visualization of Large Document Corpus (InfoVis2009-1115)",
"doi": null,
"abstractUrl": "/journal/tg/2009/06/ttg2009061161/13rRUwgQpDp",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600d900",
"title": "FeTrIL: Feature Translation for Exemplar-Free Class-Incremental Learning",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600d900/1KxVIpqNipO",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/05/09247442",
"title": "Self-Representation Based Unsupervised Exemplar Selection in a Union of Subspaces",
"doi": null,
"abstractUrl": "/journal/tp/2022/05/09247442/1osldrpucwg",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2020/1485/0/148500b405",
"title": "Exemplar Loss for Siamese Network in Visual Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2020/148500b405/1ua4MzLcdHO",
"parentPublication": {
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2020/1485/0",
"title": "2020 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09764639",
"articleId": "1CUJRA8AsVy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09765706",
"articleId": "1CY3PcYVKaA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1CUJRA8AsVy",
"doi": "10.1109/TVCG.2022.3170695",
"abstract": "Point cloud-based place recognition is a fundamental part of the localization task, and it can be achieved through a retrieval process. Reranking is a critical step in improving the retrieval accuracy, yet little effort has been devoted to reranking in point cloud retrieval. In this paper, we investigate the versatility of rigid registration in reranking the point cloud retrieval results. Specifically, after obtaining the initial retrieval list based on the global point cloud feature distance, we perform registration between the query and point clouds in the retrieval list. We propose an efficient strategy based on visual consistency to evaluate each registration with a registration score in an unsupervised manner. The final reranked list is computed by considering both the original global feature distance and the registration score. In addition, we find that the registration score between two point clouds can also be used as a pseudo label to judge whether they represent the same place. Thus, we can create a self-supervised training dataset when there is no ground truth of positional information. Moreover, we develop a new probability-based loss to obtain more discriminative descriptors. The proposed reranking approach and the probability-based loss can be easily applied to current point cloud retrieval baselines to improve the retrieval accuracy. Experiments on various benchmark datasets show that both the reranking registration method and probability-based loss can significantly improve the current state-of-the-art baselines.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Point cloud-based place recognition is a fundamental part of the localization task, and it can be achieved through a retrieval process. Reranking is a critical step in improving the retrieval accuracy, yet little effort has been devoted to reranking in point cloud retrieval. In this paper, we investigate the versatility of rigid registration in reranking the point cloud retrieval results. Specifically, after obtaining the initial retrieval list based on the global point cloud feature distance, we perform registration between the query and point clouds in the retrieval list. We propose an efficient strategy based on visual consistency to evaluate each registration with a registration score in an unsupervised manner. The final reranked list is computed by considering both the original global feature distance and the registration score. In addition, we find that the registration score between two point clouds can also be used as a pseudo label to judge whether they represent the same place. Thus, we can create a self-supervised training dataset when there is no ground truth of positional information. Moreover, we develop a new probability-based loss to obtain more discriminative descriptors. The proposed reranking approach and the probability-based loss can be easily applied to current point cloud retrieval baselines to improve the retrieval accuracy. Experiments on various benchmark datasets show that both the reranking registration method and probability-based loss can significantly improve the current state-of-the-art baselines.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Point cloud-based place recognition is a fundamental part of the localization task, and it can be achieved through a retrieval process. Reranking is a critical step in improving the retrieval accuracy, yet little effort has been devoted to reranking in point cloud retrieval. In this paper, we investigate the versatility of rigid registration in reranking the point cloud retrieval results. Specifically, after obtaining the initial retrieval list based on the global point cloud feature distance, we perform registration between the query and point clouds in the retrieval list. We propose an efficient strategy based on visual consistency to evaluate each registration with a registration score in an unsupervised manner. The final reranked list is computed by considering both the original global feature distance and the registration score. In addition, we find that the registration score between two point clouds can also be used as a pseudo label to judge whether they represent the same place. Thus, we can create a self-supervised training dataset when there is no ground truth of positional information. Moreover, we develop a new probability-based loss to obtain more discriminative descriptors. The proposed reranking approach and the probability-based loss can be easily applied to current point cloud retrieval baselines to improve the retrieval accuracy. Experiments on various benchmark datasets show that both the reranking registration method and probability-based loss can significantly improve the current state-of-the-art baselines.",
"title": "Rank-PointRetrieval: Reranking Point Cloud Retrieval via a Visually Consistent Registration Evaluation",
"normalizedTitle": "Rank-PointRetrieval: Reranking Point Cloud Retrieval via a Visually Consistent Registration Evaluation",
"fno": "09764639",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Point Cloud Compression",
"Feature Extraction",
"Three Dimensional Displays",
"Task Analysis",
"Cloud Computing",
"Training",
"Global Positioning System",
"Point Cloud",
"Point Cloud Retrieval",
"Place Recognition",
"Reranking Methods"
],
"authors": [
{
"givenName": "Wenxiao",
"surname": "Zhang",
"fullName": "Wenxiao Zhang",
"affiliation": "School of Computer Science, Wuhan University, 12390 Wuhan, Hubei, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huajian",
"surname": "Zhou",
"fullName": "Huajian Zhou",
"affiliation": "School of Computer Science, Wuhan University, 12390 Wuhan, Hubei, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhen",
"surname": "Dong",
"fullName": "Zhen Dong",
"affiliation": "State Key Laboratory of Information Engineering in Surveying, Mapping and Remote Sensing, Wuhan University, 12390 Wuhan, Hubei, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Qingan",
"surname": "Yan",
"fullName": "Qingan Yan",
"affiliation": "Big Data and Smart Supply Chain, InnoPeak Technology, Inc, Mountain View, California, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chunxia",
"surname": "Xiao",
"fullName": "Chunxia Xiao",
"affiliation": "School of Computer, Wuhan University, Wuhan, HuBei, China, 430072",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-04-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccis/2010/4270/0/4270a565",
"title": "An Improved ICP Algorithm for Point Cloud Registration",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2010/4270a565/12OmNx5YvkB",
"parentPublication": {
"id": "proceedings/iccis/2010/4270/0",
"title": "2010 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcmeim/2021/2172/0/217200a237",
"title": "Research on three-stage point cloud registration mode optimized by simulated annealing algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/wcmeim/2021/217200a237/1ANLCHhoo6Y",
"parentPublication": {
"id": "proceedings/wcmeim/2021/2172/0",
"title": "2021 4th World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200f510",
"title": "Feature Interactive Representation for Point Cloud Registration",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200f510/1BmFkzf6HuM",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/citce/2021/2184/0/218400a001",
"title": "Improved Iterative Closest Point (ICP) Point Cloud Registration Algorithm based on Matching Point Pair Quadratic Filtering",
"doi": null,
"abstractUrl": "/proceedings-article/citce/2021/218400a001/1BtfTK26ZHO",
"parentPublication": {
"id": "proceedings/citce/2021/2184/0",
"title": "2021 International Conference on Computer, Internet of Things and Control Engineering (CITCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiam/2021/1732/0/173200a407",
"title": "An Improved ICP Point Cloud Registration Algorithm Based on Three-Points Congruent Sets",
"doi": null,
"abstractUrl": "/proceedings-article/aiam/2021/173200a407/1BzTJDeh3Ms",
"parentPublication": {
"id": "proceedings/aiam/2021/1732/0",
"title": "2021 3rd International Conference on Artificial Intelligence and Advanced Manufacture (AIAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/insai/2021/0859/0/085900a136",
"title": "Registration of Point Clouds: A Survey",
"doi": null,
"abstractUrl": "/proceedings-article/insai/2021/085900a136/1CHwMbhCNQA",
"parentPublication": {
"id": "proceedings/insai/2021/0859/0",
"title": "2021 International Conference on Networking Systems of AI (INSAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09858925",
"title": "Cross-Attention-Based Feature Extraction Network for 3D Point Cloud Registration",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09858925/1G9E5a08Kze",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600a572",
"title": "SGPCR: Spherical Gaussian Point Cloud Representation and its Application to Object Registration and Retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600a572/1L8qEq48PHa",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10097640",
"title": "Sparse-to-Dense Matching Network for Large-scale LiDAR Point Cloud Registration",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10097640/1M9lILSRgL6",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09445585",
"title": "Consistent Two-Flow Network for Tele-Registration of Point Clouds",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09445585/1u8lzpSvnxu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09762800",
"articleId": "1CRqVJ1OnTO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09765327",
"articleId": "1CWoKyrHUze",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1CRqVJ1OnTO",
"doi": "10.1109/TVCG.2022.3169426",
"abstract": "We present an empirical evaluation of immersion and self-avatars as compared to desktop viewing in Virtual Reality (VR) for learning computer programming and computational thinking in middle school education using an educational VR simulation. Students were asked to programmatically choreograph dance performances for virtual characters within an educational desktop application we built earlier called Virtual Environment Interactions (VEnvI). As part of a middle school science class, 90 students from the 6th and 7th grades participated in our study. All students first visually programmed dance choreography for a virtual character they created in VEnvI on a laptop. Then, they viewed and interacted with the resulting dance performance in a between-subjects design in one of the three conditions. We compared and contrasted the benefits of embodied immersive virtual reality (EVR) viewing utilizing a head-mounted display with a body-scaled and gender-matched self-avatar, immersive virtual reality only (IVR) viewing, and desktop VR (NVR) viewing with VEnvI on pedagogical outcomes, programming performance, presence, and attitudes towards STEM and computational thinking. Results from a cognition questionnaire showed that, in the learning dimensions of Knowledge and Understanding (Bloom's taxonomy) as well as Multistructural (SOLO taxonomy), participants in EVR and IVR scored significantly higher than NVR. Also, participants in EVR scored significantly higher than IVR. We also discovered similar results in objective programming performance and presence scores in VEnvI. Furthermore, students' attitudes towards computer science, programming confidence, and impressions significantly improved to be the highest in EVR and then IVR as compared to NVR condition.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present an empirical evaluation of immersion and self-avatars as compared to desktop viewing in Virtual Reality (VR) for learning computer programming and computational thinking in middle school education using an educational VR simulation. Students were asked to programmatically choreograph dance performances for virtual characters within an educational desktop application we built earlier called Virtual Environment Interactions (VEnvI). As part of a middle school science class, 90 students from the 6th and 7th grades participated in our study. All students first visually programmed dance choreography for a virtual character they created in VEnvI on a laptop. Then, they viewed and interacted with the resulting dance performance in a between-subjects design in one of the three conditions. We compared and contrasted the benefits of embodied immersive virtual reality (EVR) viewing utilizing a head-mounted display with a body-scaled and gender-matched self-avatar, immersive virtual reality only (IVR) viewing, and desktop VR (NVR) viewing with VEnvI on pedagogical outcomes, programming performance, presence, and attitudes towards STEM and computational thinking. Results from a cognition questionnaire showed that, in the learning dimensions of Knowledge and Understanding (Bloom's taxonomy) as well as Multistructural (SOLO taxonomy), participants in EVR and IVR scored significantly higher than NVR. Also, participants in EVR scored significantly higher than IVR. We also discovered similar results in objective programming performance and presence scores in VEnvI. Furthermore, students' attitudes towards computer science, programming confidence, and impressions significantly improved to be the highest in EVR and then IVR as compared to NVR condition.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present an empirical evaluation of immersion and self-avatars as compared to desktop viewing in Virtual Reality (VR) for learning computer programming and computational thinking in middle school education using an educational VR simulation. Students were asked to programmatically choreograph dance performances for virtual characters within an educational desktop application we built earlier called Virtual Environment Interactions (VEnvI). As part of a middle school science class, 90 students from the 6th and 7th grades participated in our study. All students first visually programmed dance choreography for a virtual character they created in VEnvI on a laptop. Then, they viewed and interacted with the resulting dance performance in a between-subjects design in one of the three conditions. We compared and contrasted the benefits of embodied immersive virtual reality (EVR) viewing utilizing a head-mounted display with a body-scaled and gender-matched self-avatar, immersive virtual reality only (IVR) viewing, and desktop VR (NVR) viewing with VEnvI on pedagogical outcomes, programming performance, presence, and attitudes towards STEM and computational thinking. Results from a cognition questionnaire showed that, in the learning dimensions of Knowledge and Understanding (Bloom's taxonomy) as well as Multistructural (SOLO taxonomy), participants in EVR and IVR scored significantly higher than NVR. Also, participants in EVR scored significantly higher than IVR. We also discovered similar results in objective programming performance and presence scores in VEnvI. Furthermore, students' attitudes towards computer science, programming confidence, and impressions significantly improved to be the highest in EVR and then IVR as compared to NVR condition.",
"title": "How Immersion and Self-Avatars in VR Affect Learning Programming and Computational Thinking in Middle School Education",
"normalizedTitle": "How Immersion and Self-Avatars in VR Affect Learning Programming and Computational Thinking in Middle School Education",
"fno": "09762800",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Programming Profession",
"Cognition",
"Training",
"Task Analysis",
"STEM",
"Solid Modeling",
"Virtual Environments",
"Virtual Reality",
"Computer Science Education",
"Embodied Cognition",
"Self Avatars",
"Immersion",
"VR In Middle School Education"
],
"authors": [
{
"givenName": "Dhaval",
"surname": "Parmar",
"fullName": "Dhaval Parmar",
"affiliation": "Khoury College of Computer Sciences, Northeastern University, 1848 Boston, Massachusetts, United States, 02115-5005",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lorraine",
"surname": "Lin",
"fullName": "Lorraine Lin",
"affiliation": "School of Computing, Clemson University, 2545 Clemson, South Carolina, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nikeetha",
"surname": "Dsouza",
"fullName": "Nikeetha Dsouza",
"affiliation": "Office of the Vice Provost for Diversity and Inclusion, Indiana University Bloomington, 1771 Bloomington, Indiana, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sophie",
"surname": "Joerg",
"fullName": "Sophie Joerg",
"affiliation": "School of Computing, Clemson University, Clemson, South Carolina, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alison E",
"surname": "Leonard",
"fullName": "Alison E Leonard",
"affiliation": "Eugene T. Moore School of Education, Clemson University, 2545 Clemson, South Carolina, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shaundra B",
"surname": "Daily",
"fullName": "Shaundra B Daily",
"affiliation": "Department of Electrical & Computer Engineering and Computer Science, Duke University, 3065 Durham, North Carolina, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sabarish",
"surname": "Babu",
"fullName": "Sabarish Babu",
"affiliation": "Human Centered Computing, Clemson University, Clemson, South Carolina, United States, 29534",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-04-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/respect/2016/3419/0/07836179",
"title": "Can embodied interaction and virtual peer customization in a virtual programming environment enhance computational thinking?",
"doi": null,
"abstractUrl": "/proceedings-article/respect/2016/07836179/12OmNA14Ai4",
"parentPublication": {
"id": "proceedings/respect/2016/3419/0",
"title": "2016 Research on Equity and Sustained Participation in Engineering, Computing, and Technology (RESPECT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223466",
"title": "Nested immersion: Describing and classifying augmented virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223466/12OmNAle6GI",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504696",
"title": "Programming moves: Design and evaluation of applying embodied interaction in virtual environments to enhance computational thinking in middle school students",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504696/12OmNzDNtwD",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2007/0907/0/04142843",
"title": "Optical Sight Metaphor for Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2007/04142843/12OmNzdoMlq",
"parentPublication": {
"id": "proceedings/3dui/2007/0907/0",
"title": "2007 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446046",
"title": "The Effect of Immersion on Emotional Responses to Film Viewing in a Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446046/13bd1gCd7Th",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08269807",
"title": "Saliency in VR: How Do People Explore Virtual Environments?",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08269807/13rRUxDqS8o",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/04/ttg2013040691",
"title": "Perceptual Calibration for Immersive Display Environments",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040691/13rRUxlgy3G",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itei/2021/8050/0/805000a232",
"title": "VR technology applied to traditional dance",
"doi": null,
"abstractUrl": "/proceedings-article/itei/2021/805000a232/1CzeG2lZvEI",
"parentPublication": {
"id": "proceedings/itei/2021/8050/0",
"title": "2021 3rd International Conference on Internet Technology and Educational Informization (ITEI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089451",
"title": "Learning in the Field: Comparison of Desktop, Immersive Virtual Reality, and Actual Field Trips for Place-Based STEM Education",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089451/1jIxeqcYfFS",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2021/3225/0/322500a267",
"title": "ScienceVR: A Virtual Reality Framework for STEM Education, Simulation and Assessment",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2021/322500a267/1zxLsREup4Q",
"parentPublication": {
"id": "proceedings/aivr/2021/3225/0",
"title": "2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09761750",
"articleId": "1CKMkYw5fa0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09764639",
"articleId": "1CUJRA8AsVy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1CKMkLCKOSk",
"doi": "10.1109/TVCG.2022.3169222",
"abstract": "Body-centric locomotion allows users to control both movement speed and direction with body parts (e.g., head tilt, arm swing or torso lean) to navigate in virtual reality (VR). However, there is little research to systematically investigate the effects of body parts for speed and direction control on virtual locomotion by taking in account different transfer functions(L: linear function, P: power function, and CL: piecewise function with constant and linear function). Therefore, we conducted an experiment to evaluate the combinational effects of the three factors (body parts for direction control, body parts for speed control, and transfer functions) on virtual locomotion. Results showed that (1) the head outperformed the torso for movement direction control in task completion time and environmental collisions; (2) Arm-based speed control led to shorter traveled distances than both head and knee. Head-based speed control had fewer environmental collisions than knee; (3) Body-centric locomotion with CL function was faster but less accurate than both L and P functions. Task time significantly decreased from P, L to CL functions, while traveled distance and overshoot significantly increased from P, L to CL functions. L function was rated with the highest score of USE-S, -pragmatic and -hedonic; (4) Transfer function had a significant main effect on motion sickness: the participants felt more headache and nausea when performing locomotion with CL function. Our results provide implications for body-centric locomotion design in VR applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Body-centric locomotion allows users to control both movement speed and direction with body parts (e.g., head tilt, arm swing or torso lean) to navigate in virtual reality (VR). However, there is little research to systematically investigate the effects of body parts for speed and direction control on virtual locomotion by taking in account different transfer functions(L: linear function, P: power function, and CL: piecewise function with constant and linear function). Therefore, we conducted an experiment to evaluate the combinational effects of the three factors (body parts for direction control, body parts for speed control, and transfer functions) on virtual locomotion. Results showed that (1) the head outperformed the torso for movement direction control in task completion time and environmental collisions; (2) Arm-based speed control led to shorter traveled distances than both head and knee. Head-based speed control had fewer environmental collisions than knee; (3) Body-centric locomotion with CL function was faster but less accurate than both L and P functions. Task time significantly decreased from P, L to CL functions, while traveled distance and overshoot significantly increased from P, L to CL functions. L function was rated with the highest score of USE-S, -pragmatic and -hedonic; (4) Transfer function had a significant main effect on motion sickness: the participants felt more headache and nausea when performing locomotion with CL function. Our results provide implications for body-centric locomotion design in VR applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Body-centric locomotion allows users to control both movement speed and direction with body parts (e.g., head tilt, arm swing or torso lean) to navigate in virtual reality (VR). However, there is little research to systematically investigate the effects of body parts for speed and direction control on virtual locomotion by taking in account different transfer functions(L: linear function, P: power function, and CL: piecewise function with constant and linear function). Therefore, we conducted an experiment to evaluate the combinational effects of the three factors (body parts for direction control, body parts for speed control, and transfer functions) on virtual locomotion. Results showed that (1) the head outperformed the torso for movement direction control in task completion time and environmental collisions; (2) Arm-based speed control led to shorter traveled distances than both head and knee. Head-based speed control had fewer environmental collisions than knee; (3) Body-centric locomotion with CL function was faster but less accurate than both L and P functions. Task time significantly decreased from P, L to CL functions, while traveled distance and overshoot significantly increased from P, L to CL functions. L function was rated with the highest score of USE-S, -pragmatic and -hedonic; (4) Transfer function had a significant main effect on motion sickness: the participants felt more headache and nausea when performing locomotion with CL function. Our results provide implications for body-centric locomotion design in VR applications.",
"title": "Effects of Transfer Functions and Body Parts on Body-centric Locomotion in Virtual Reality",
"normalizedTitle": "Effects of Transfer Functions and Body Parts on Body-centric Locomotion in Virtual Reality",
"fno": "09761724",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Transfer Functions",
"Torso",
"Task Analysis",
"Knee",
"Velocity Control",
"Navigation",
"Legged Locomotion",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Virtual Reality"
],
"authors": [
{
"givenName": "Boyu",
"surname": "Gao",
"fullName": "Boyu Gao",
"affiliation": "College of Information Science and Technology/Cyber Security, Jinan University, 47885 Guangzhou, guangdong, China, 510632",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zijun",
"surname": "Mai",
"fullName": "Zijun Mai",
"affiliation": "College of Cyber Security, Jinan University, 47885 Guangzhou, Guangdong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huawei",
"surname": "Tu",
"fullName": "Huawei Tu",
"affiliation": "Computer Science, La Trobe University, 2080 Melbourne, Victoria, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Henry",
"surname": "Duh",
"fullName": "Henry Duh",
"affiliation": "Department of Computer Science and Information Technology, La Trobe University, 2080 Melbourne, Victoria, Australia",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-04-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/fg/2011/9140/0/05771353",
"title": "Expression of emotional states during locomotion based on canonical parameters",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2011/05771353/12OmNAqU4VX",
"parentPublication": {
"id": "proceedings/fg/2011/9140/0",
"title": "Face and Gesture 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2015/6886/0/07131725",
"title": "LazyNav: 3D ground navigation with non-critical body parts",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2015/07131725/12OmNBCqbId",
"parentPublication": {
"id": "proceedings/3dui/2015/6886/0",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgames/2013/0820/0/06632600",
"title": "Measuring the steps: Generating action transitions between locomotion behaviours",
"doi": null,
"abstractUrl": "/proceedings-article/cgames/2013/06632600/12OmNBNM8VZ",
"parentPublication": {
"id": "proceedings/cgames/2013/0820/0",
"title": "2013 18th International Conference on Computer Games: AI, Animation, Mobile, Interactive Multimedia, Educational & Serious Games (CGAMES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457d435",
"title": "Detangling People: Individuating Multiple Close People and Their Body Parts via Region Assembly",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457d435/12OmNx8wTgF",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipsn/2008/3157/0/3157a555",
"title": "Locomotion Monitoring Using Body Sensor Networks",
"doi": null,
"abstractUrl": "/proceedings-article/ipsn/2008/3157a555/12OmNxTEiS2",
"parentPublication": {
"id": "proceedings/ipsn/2008/3157/0",
"title": "2008 International Conference on Information Processing in Sensor Networks (IPSN 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798070",
"title": "User-Centered Extension of a Locomotion Typology: Movement-Related Sensory Feedback and Spatial Learning",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798070/1cJ18ja0QXC",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089654",
"title": "Effects of Locomotion Style and Body Visibility of a Telepresence Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089654/1jIxd00PzX2",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a346",
"title": "Spring Stepper: A Seated VR Locomotion Controller",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a346/1oZBBswUSzK",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a391",
"title": "The Effectiveness of Locomotion Interfaces Depends on Self-Motion Cues, Environmental Cues, and the Individual",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a391/1tnXFgLAfSw",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a493",
"title": "Evaluation of Body-centric Locomotion with Different Transfer Functions in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a493/1tuBnu6n9jq",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09760126",
"articleId": "1CHsCMvyfuw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09761750",
"articleId": "1CKMkYw5fa0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1CKMkYw5fa0",
"doi": "10.1109/TVCG.2022.3169175",
"abstract": "Appropriate gestures can enhance message delivery and audience engagement in both daily communication and public presentations. In this paper, we contribute a visual analytic approach that assists professional public speaking coaches in improving their practice of gesture training through analyzing presentation videos. Manually checking and exploring gesture usage in the presentation videos is often tedious and time-consuming. There lacks an efficient method to help users conduct gesture exploration, which is challenging due to the intrinsically temporal evolution of gestures and their complex correlation to speech content. In this paper, we propose GestureLens, a visual analytics system to facilitate gesture-based and content-based exploration of gesture usage in presentation videos. Specifically, the exploration view enables users to obtain a quick overview of the spatial and temporal distributions of gestures. The dynamic hand movements are firstly aggregated through a heatmap in the gesture space for uncovering spatial patterns, and then decomposed into two mutually perpendicular timelines for revealing temporal patterns. The relation view allows users to explicitly explore the correlation between speech content and gestures by enabling linked analysis and intuitive glyph designs. The video view and dynamic view show the context and overall dynamic movement of the selected gestures, respectively. Two usage scenarios and expert interviews with professional presentation coaches demonstrate the effectiveness and usefulness of GestureLens in facilitating gesture exploration and analysis of presentation videos.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Appropriate gestures can enhance message delivery and audience engagement in both daily communication and public presentations. In this paper, we contribute a visual analytic approach that assists professional public speaking coaches in improving their practice of gesture training through analyzing presentation videos. Manually checking and exploring gesture usage in the presentation videos is often tedious and time-consuming. There lacks an efficient method to help users conduct gesture exploration, which is challenging due to the intrinsically temporal evolution of gestures and their complex correlation to speech content. In this paper, we propose GestureLens, a visual analytics system to facilitate gesture-based and content-based exploration of gesture usage in presentation videos. Specifically, the exploration view enables users to obtain a quick overview of the spatial and temporal distributions of gestures. The dynamic hand movements are firstly aggregated through a heatmap in the gesture space for uncovering spatial patterns, and then decomposed into two mutually perpendicular timelines for revealing temporal patterns. The relation view allows users to explicitly explore the correlation between speech content and gestures by enabling linked analysis and intuitive glyph designs. The video view and dynamic view show the context and overall dynamic movement of the selected gestures, respectively. Two usage scenarios and expert interviews with professional presentation coaches demonstrate the effectiveness and usefulness of GestureLens in facilitating gesture exploration and analysis of presentation videos.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Appropriate gestures can enhance message delivery and audience engagement in both daily communication and public presentations. In this paper, we contribute a visual analytic approach that assists professional public speaking coaches in improving their practice of gesture training through analyzing presentation videos. Manually checking and exploring gesture usage in the presentation videos is often tedious and time-consuming. There lacks an efficient method to help users conduct gesture exploration, which is challenging due to the intrinsically temporal evolution of gestures and their complex correlation to speech content. In this paper, we propose GestureLens, a visual analytics system to facilitate gesture-based and content-based exploration of gesture usage in presentation videos. Specifically, the exploration view enables users to obtain a quick overview of the spatial and temporal distributions of gestures. The dynamic hand movements are firstly aggregated through a heatmap in the gesture space for uncovering spatial patterns, and then decomposed into two mutually perpendicular timelines for revealing temporal patterns. The relation view allows users to explicitly explore the correlation between speech content and gestures by enabling linked analysis and intuitive glyph designs. The video view and dynamic view show the context and overall dynamic movement of the selected gestures, respectively. Two usage scenarios and expert interviews with professional presentation coaches demonstrate the effectiveness and usefulness of GestureLens in facilitating gesture exploration and analysis of presentation videos.",
"title": "GestureLens: Visual Analysis of Gestures in Presentation Videos",
"normalizedTitle": "GestureLens: Visual Analysis of Gestures in Presentation Videos",
"fno": "09761750",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Videos",
"Correlation",
"Training",
"Data Visualization",
"Public Speaking",
"Visual Analytics",
"Trajectory",
"Gesture",
"Hand Movements",
"Presentation Video Analysis",
"Visual Analysis"
],
"authors": [
{
"givenName": "Haipeng",
"surname": "Zeng",
"fullName": "Haipeng Zeng",
"affiliation": "School of Intelligent Systems Engineering, Sun Yat-Sen University, 26469 Shenzhen, Guangdong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xingbo",
"surname": "Wang",
"fullName": "Xingbo Wang",
"affiliation": "CSE, HKUST, 58207 Kowloon, Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yong",
"surname": "Wang",
"fullName": "Yong Wang",
"affiliation": "School of Information Systems, Singapore Management University, 54756 Singapore, Singapore, Singapore, 178902",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Aoyu",
"surname": "Wu",
"fullName": "Aoyu Wu",
"affiliation": "Computer Science, The Hong Kong University of Science and Technology, Hong Kong, Kowloon, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ting-Chuen",
"surname": "Pong",
"fullName": "Ting-Chuen Pong",
"affiliation": "Computer Science & Engineering, Hong Kong University of Science and Technology, 58207 Kowloon, Hong Kong, Hong Kong, HKG",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huamin",
"surname": "Qu",
"fullName": "Huamin Qu",
"affiliation": "The Department of Computer Science and Engineering, The Hong Kong University of Science and Technology, 58207 Kowloon, Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-04-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2003/7965/1/7965181",
"title": "Oscillatory gestures and discourse",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2003/7965181/12OmNAkWvem",
"parentPublication": {
"id": "proceedings/icme/2003/7965/1",
"title": "2003 International Conference on Multimedia and Expo. ICME '03. Proceedings (Cat. No.03TH8698)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2010/7029/0/05543253",
"title": "Annotation and taxonomy of gestures in lecture videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2010/05543253/12OmNBNM8OJ",
"parentPublication": {
"id": "proceedings/cvprw/2010/7029/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2006/2521/1/252111100",
"title": "Visual Recognition of Similar Gestures",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2006/252111100/12OmNBcShVa",
"parentPublication": {
"id": "proceedings/icpr/2006/2521/1",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2006/0366/0/04036684",
"title": "Prediction-Based Gesture Detection in Lecture Videos by Combining Visual, Speech and Electronic Slides",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2006/04036684/12OmNCzsKG8",
"parentPublication": {
"id": "proceedings/icme/2006/0366/0",
"title": "2006 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2018/4886/0/488601a476",
"title": "Multichannel Attention Network for Analyzing Visual Behavior in Public Speaking",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2018/488601a476/12OmNqEjhZu",
"parentPublication": {
"id": "proceedings/wacv/2018/4886/0",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2017/0733/0/0733a275",
"title": "Face Presentation Attack with Latex Masks in Multispectral Videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2017/0733a275/12OmNrY3LAh",
"parentPublication": {
"id": "proceedings/cvprw/2017/0733/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2006/9753/0/04042260",
"title": "Hierarchical Segmentation of Presentation Videos through Visual and Text Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2006/04042260/12OmNzgwmSb",
"parentPublication": {
"id": "proceedings/isspit/2006/9753/0",
"title": "2006 IEEE International Symposium on Signal Processing and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ex/2016/06/mex2016060082",
"title": "Multimodal Sentiment Intensity Analysis in Videos: Facial Gestures and Verbal Messages",
"doi": null,
"abstractUrl": "/magazine/ex/2016/06/mex2016060082/13rRUwgyOcc",
"parentPublication": {
"id": "mags/ex",
"title": "IEEE Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000c145",
"title": "Recognizing American Sign Language Gestures from Within Continuous Videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000c145/17D45WrVg1p",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08807235",
"title": "<italic>EmoCo</italic>: Visual Analysis of Emotion Coherence in Presentation Videos",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08807235/1cG6m1AVG6c",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09761724",
"articleId": "1CKMkLCKOSk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09762800",
"articleId": "1CRqVJ1OnTO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1CHsCMvyfuw",
"doi": "10.1109/TVCG.2022.3167896",
"abstract": "Since 2016, we have witnessed the tremendous growth of artificial intelligence+visualization (AI+VIS) research. However, existing survey papers on AI+VIS focus on visual analytics and information visualization, not scientific visualization (SciVis). In this paper, we survey related deep learning (DL) works in SciVis, specifically in the direction of DL4SciVis: designing DL solutions for solving SciVis problems. To stay focused, we primarily consider works that handle scalar and vector field data but exclude mesh data. We classify and discuss these works along six dimensions: domain setting, research task, learning type, network architecture, loss function, and evaluation metric. The paper concludes with a discussion of the remaining gaps to fill along the discussed dimensions and the grand challenges we need to tackle as a community. This state-of-the-art survey guides SciVis researchers in gaining an overview of this emerging topic and points out future directions to grow this research.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Since 2016, we have witnessed the tremendous growth of artificial intelligence+visualization (AI+VIS) research. However, existing survey papers on AI+VIS focus on visual analytics and information visualization, not scientific visualization (SciVis). In this paper, we survey related deep learning (DL) works in SciVis, specifically in the direction of DL4SciVis: designing DL solutions for solving SciVis problems. To stay focused, we primarily consider works that handle scalar and vector field data but exclude mesh data. We classify and discuss these works along six dimensions: domain setting, research task, learning type, network architecture, loss function, and evaluation metric. The paper concludes with a discussion of the remaining gaps to fill along the discussed dimensions and the grand challenges we need to tackle as a community. This state-of-the-art survey guides SciVis researchers in gaining an overview of this emerging topic and points out future directions to grow this research.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Since 2016, we have witnessed the tremendous growth of artificial intelligence+visualization (AI+VIS) research. However, existing survey papers on AI+VIS focus on visual analytics and information visualization, not scientific visualization (SciVis). In this paper, we survey related deep learning (DL) works in SciVis, specifically in the direction of DL4SciVis: designing DL solutions for solving SciVis problems. To stay focused, we primarily consider works that handle scalar and vector field data but exclude mesh data. We classify and discuss these works along six dimensions: domain setting, research task, learning type, network architecture, loss function, and evaluation metric. The paper concludes with a discussion of the remaining gaps to fill along the discussed dimensions and the grand challenges we need to tackle as a community. This state-of-the-art survey guides SciVis researchers in gaining an overview of this emerging topic and points out future directions to grow this research.",
"title": "DL4SciVis: A State-of-the-Art Survey on Deep Learning for Scientific Visualization",
"normalizedTitle": "DL4SciVis: A State-of-the-Art Survey on Deep Learning for Scientific Visualization",
"fno": "09760126",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Task Analysis",
"Generative Adversarial Networks",
"Neural Networks",
"Measurement",
"Deep Learning",
"Convolutional Neural Networks",
"Three Dimensional Displays",
"Scientific Visualization",
"Deep Learning",
"Survey"
],
"authors": [
{
"givenName": "Chaoli",
"surname": "Wang",
"fullName": "Chaoli Wang",
"affiliation": "Department of Computer Science and Engineering, University of Notre Dame, Notre Dame, Indiana, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jun",
"surname": "Han",
"fullName": "Jun Han",
"affiliation": "Department of Computer Science and Engineering, University of Notre Dame, Notre Dame, Indiana, United States",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-04-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/visual/2004/8788/0/01372246",
"title": "Panel 4: What Should We Teach in a Scientific Visualization Class?",
"doi": null,
"abstractUrl": "/proceedings-article/visual/2004/01372246/12OmNxj23hJ",
"parentPublication": {
"id": "proceedings/visual/2004/8788/0",
"title": "IEEE Visualization 2004",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rev/2009/4104/0/41040046",
"title": "Requirements Engineering Visualization: A Survey on the State-of-the-Art",
"doi": null,
"abstractUrl": "/proceedings-article/rev/2009/41040046/12OmNzIUfQV",
"parentPublication": {
"id": "proceedings/rev/2009/4104/0",
"title": "Requirements Engineering Visualization, First International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010001",
"title": "State of the Journal",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010001/13rRUEgarsI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/09/07583708",
"title": "Vispubdata.org: A Metadata Collection About IEEE Visualization (VIS) Publications",
"doi": null,
"abstractUrl": "/journal/tg/2017/09/07583708/13rRUxd2aZ7",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiam/2021/1732/0/173200a386",
"title": "A Survey of Research on Image Style Transfer Based on Deep Learning",
"doi": null,
"abstractUrl": "/proceedings-article/aiam/2021/173200a386/1BzTXiDr6lW",
"parentPublication": {
"id": "proceedings/aiam/2021/1732/0",
"title": "2021 3rd International Conference on Artificial Intelligence and Advanced Manufacture (AIAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2022/9007/0/900700a104",
"title": "Visualization overview: Using modern text mining techniques to provide insight into visualization research practice",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2022/900700a104/1KaFNxrDmY8",
"parentPublication": {
"id": "proceedings/iv/2022/9007/0",
"title": "2022 26th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scivis/2018/6882/0/08823764",
"title": "Toward A Deep Understanding of What Makes a Scientific Visualization Memorable",
"doi": null,
"abstractUrl": "/proceedings-article/scivis/2018/08823764/1d5kxlHWMEg",
"parentPublication": {
"id": "proceedings/scivis/2018/6882/0",
"title": "2018 IEEE Scientific Visualization Conference (SciVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/07/09356353",
"title": "Image Segmentation Using Deep Learning: A Survey",
"doi": null,
"abstractUrl": "/journal/tp/2022/07/09356353/1rigXK0s5Ak",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09523770",
"title": "A Survey on ML4VIS: Applying Machine Learning Advances to Data Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09523770/1wnLgd43B5K",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09760161",
"articleId": "1CHsCvUiJQA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09761724",
"articleId": "1CKMkLCKOSk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1CHsCvUiJQA",
"doi": "10.1109/TVCG.2022.3168190",
"abstract": "With advances in Virtual Reality (VR) technology, user expectation for a near-perfect experience is also increasing. The push for a wider field-of-view can increase the challenges of correcting lens distortion. Past studies on imperfect VR experiences have focused on motion sickness provoked by vection-inducing VR stimuli and discomfort due to mismatches in accommodation and binocular convergence. Disorientation and discomfort due to unintended optical flow induced by lens distortion, referred to as dynamic distortion (DD), has, to date, received little attention. This study examines and models the effects of DD during head rotations with various fixed gazes stabilized by vestibulo-ocular reflex (VOR). Increases in DD levels comparable to lens parameters from poorly designed commercial VR lenses significantly increase discomfort scores of viewers in relation to disorientation, dizziness, and eye strain. Cross-validated results indicate that the model is able to predict significant differences in subjective scores resulting from different commercial VR lenses and these predictions correlated with empirical data. The present work provides new insights to understand symptoms of discomfort in VR during user interactions with static world-locked / space-stabilized scenes and contributes to the design of discomfort-free VR headset lenses.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With advances in Virtual Reality (VR) technology, user expectation for a near-perfect experience is also increasing. The push for a wider field-of-view can increase the challenges of correcting lens distortion. Past studies on imperfect VR experiences have focused on motion sickness provoked by vection-inducing VR stimuli and discomfort due to mismatches in accommodation and binocular convergence. Disorientation and discomfort due to unintended optical flow induced by lens distortion, referred to as dynamic distortion (DD), has, to date, received little attention. This study examines and models the effects of DD during head rotations with various fixed gazes stabilized by vestibulo-ocular reflex (VOR). Increases in DD levels comparable to lens parameters from poorly designed commercial VR lenses significantly increase discomfort scores of viewers in relation to disorientation, dizziness, and eye strain. Cross-validated results indicate that the model is able to predict significant differences in subjective scores resulting from different commercial VR lenses and these predictions correlated with empirical data. The present work provides new insights to understand symptoms of discomfort in VR during user interactions with static world-locked / space-stabilized scenes and contributes to the design of discomfort-free VR headset lenses.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With advances in Virtual Reality (VR) technology, user expectation for a near-perfect experience is also increasing. The push for a wider field-of-view can increase the challenges of correcting lens distortion. Past studies on imperfect VR experiences have focused on motion sickness provoked by vection-inducing VR stimuli and discomfort due to mismatches in accommodation and binocular convergence. Disorientation and discomfort due to unintended optical flow induced by lens distortion, referred to as dynamic distortion (DD), has, to date, received little attention. This study examines and models the effects of DD during head rotations with various fixed gazes stabilized by vestibulo-ocular reflex (VOR). Increases in DD levels comparable to lens parameters from poorly designed commercial VR lenses significantly increase discomfort scores of viewers in relation to disorientation, dizziness, and eye strain. Cross-validated results indicate that the model is able to predict significant differences in subjective scores resulting from different commercial VR lenses and these predictions correlated with empirical data. The present work provides new insights to understand symptoms of discomfort in VR during user interactions with static world-locked / space-stabilized scenes and contributes to the design of discomfort-free VR headset lenses.",
"title": "Predicting Subjective Discomfort Associated with Lens Distortion in VR Headsets During Vestibulo-Ocular Response to VR Scenes",
"normalizedTitle": "Predicting Subjective Discomfort Associated with Lens Distortion in VR Headsets During Vestibulo-Ocular Response to VR Scenes",
"fno": "09760161",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Lenses",
"Optical Distortion",
"Distortion",
"Pupils",
"Headphones",
"Visualization",
"Adaptive Optics",
"Virtual Reality",
"Lens Distortion",
"Visual Discomfort",
"Motion Sickness",
"Disorientation",
"Vestibulo Ocular Reflex"
],
"authors": [
{
"givenName": "Tsz Tai",
"surname": "Chan",
"fullName": "Tsz Tai Chan",
"affiliation": "IEDA, The Hong Kong University of Science and Technology, 58207 Hong Kong, Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yixuan",
"surname": "Wang",
"fullName": "Yixuan Wang",
"affiliation": "CBE, The Hong Kong University of Science and Technology, 58207 Hong Kong, Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Richard Hau Yue",
"surname": "So",
"fullName": "Richard Hau Yue So",
"affiliation": "IEDA and CBE, The Hong Kong University of Science and Technology, 58207 Hong Kong, Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jerry",
"surname": "Jia",
"fullName": "Jerry Jia",
"affiliation": "Facebook Reality Laboratory, Facebook Inc, 342996 Menlo Park, California, United States",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-04-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2015/6759/0/07301373",
"title": "Fresnel lens imaging with post-capture image processing",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2015/07301373/12OmNBLdKJ7",
"parentPublication": {
"id": "proceedings/cvprw/2015/6759/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2010/6237/0/05444782",
"title": "Single-pass 3D lens rendering and spatiotemporal \"Time Warp\" example",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444782/12OmNBO3JYm",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ised/2014/6965/0/6965a025",
"title": "Plasmonic Lens Based on Elliptically Tapered Metallic Nano Slits",
"doi": null,
"abstractUrl": "/proceedings-article/ised/2014/6965a025/12OmNwdtw8Y",
"parentPublication": {
"id": "proceedings/ised/2014/6965/0",
"title": "2014 Fifth International Symposium on Electronic System Design (ISED)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2013/5053/0/06475054",
"title": "Automatic curve selection for lens distortion correction using Hough transform energy",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2013/06475054/12OmNzXFoJb",
"parentPublication": {
"id": "proceedings/wacv/2013/5053/0",
"title": "Applications of Computer Vision, IEEE Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873994",
"title": "The impacts of lens and stereo camera separation on perceived slant in Virtual Reality head-mounted displays",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873994/1GjwGUGyCuk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a628",
"title": "A Binocular Model to Evaluate User Experience in Ophthalmic and AR Prescription Lens Designs",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a628/1J7WmUiV2la",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798226",
"title": "A New 360 Camera Design for Multi Format VR Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798226/1cJ0SS2Brk4",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798107",
"title": "Gaze-Dependent Distortion Correction for Thick Lenses in HMDs",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798107/1cJ12M9tKM0",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090461",
"title": "Front Camera Eye Tracking For Mobile VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090461/1jIxzvZw4YU",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09384477",
"title": "Lenslet VR: Thin, Flat and Wide-FOV Virtual Reality Display Using Fresnel Lens and Lenslet Array",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09384477/1scDuWhBPY4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09756929",
"articleId": "1Cxva6pb2iA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09760126",
"articleId": "1CHsCMvyfuw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Cxva6pb2iA",
"doi": "10.1109/TVCG.2022.3167151",
"abstract": "3D point clouds have found a wide variety of applications in multimedia processing, remote sensing, and scientific computing. Although most point cloud processing systems are developed to improve viewer experiences, little work has been dedicated to perceptual quality assessment of 3D point clouds. In this work, we build a new 3D point cloud database, namely the Waterloo Point Cloud (WPC) database. In contrast to existing datasets consisting of small-scale and low-quality source content of constrained viewing angles, the WPC database contains 20 high quality, realistic, and omni-directional source point clouds and 740 diversely distorted point clouds. We carry out a subjective quality assessment experiment over the database in a controlled lab environment. Our statistical analysis suggests that existing objective point cloud quality assessment (PCQA) models only achieve limited success in predicting subjective quality ratings. We propose a novel objective PCQA model based on an attention mechanism and a variant of information content-weighted structural similarity, which significantly outperforms existing PCQA models. The database has been made publicly available at https://github.com/qdushl/Waterloo-Point-Cloud-Database.",
"abstracts": [
{
"abstractType": "Regular",
"content": "3D point clouds have found a wide variety of applications in multimedia processing, remote sensing, and scientific computing. Although most point cloud processing systems are developed to improve viewer experiences, little work has been dedicated to perceptual quality assessment of 3D point clouds. In this work, we build a new 3D point cloud database, namely the Waterloo Point Cloud (WPC) database. In contrast to existing datasets consisting of small-scale and low-quality source content of constrained viewing angles, the WPC database contains 20 high quality, realistic, and omni-directional source point clouds and 740 diversely distorted point clouds. We carry out a subjective quality assessment experiment over the database in a controlled lab environment. Our statistical analysis suggests that existing objective point cloud quality assessment (PCQA) models only achieve limited success in predicting subjective quality ratings. We propose a novel objective PCQA model based on an attention mechanism and a variant of information content-weighted structural similarity, which significantly outperforms existing PCQA models. The database has been made publicly available at https://github.com/qdushl/Waterloo-Point-Cloud-Database.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "3D point clouds have found a wide variety of applications in multimedia processing, remote sensing, and scientific computing. Although most point cloud processing systems are developed to improve viewer experiences, little work has been dedicated to perceptual quality assessment of 3D point clouds. In this work, we build a new 3D point cloud database, namely the Waterloo Point Cloud (WPC) database. In contrast to existing datasets consisting of small-scale and low-quality source content of constrained viewing angles, the WPC database contains 20 high quality, realistic, and omni-directional source point clouds and 740 diversely distorted point clouds. We carry out a subjective quality assessment experiment over the database in a controlled lab environment. Our statistical analysis suggests that existing objective point cloud quality assessment (PCQA) models only achieve limited success in predicting subjective quality ratings. We propose a novel objective PCQA model based on an attention mechanism and a variant of information content-weighted structural similarity, which significantly outperforms existing PCQA models. The database has been made publicly available at https://github.com/qdushl/Waterloo-Point-Cloud-Database.",
"title": "Perceptual Quality Assessment of Colored 3D Point Clouds",
"normalizedTitle": "Perceptual Quality Assessment of Colored 3D Point Clouds",
"fno": "09756929",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Point Cloud Compression",
"Monitoring",
"Color",
"Databases",
"Colored Noise",
"Three Dimensional Displays",
"Quality Assessment",
"Point Cloud",
"Subjective Quality Assessment",
"Attention Model",
"Objective Quality Assessment"
],
"authors": [
{
"givenName": "Qi",
"surname": "Liu",
"fullName": "Qi Liu",
"affiliation": "College of Electronic Information, Qingdao University, 12593 Qingdao, Shandong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Honglei",
"surname": "Su",
"fullName": "Honglei Su",
"affiliation": "College of Electronic Information, Qingdao University, 12593 Qingdao, Shandong, China, 266071",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhengfang",
"surname": "Duanmu",
"fullName": "Zhengfang Duanmu",
"affiliation": "Electrical and Computer Engineering, University of Waterloo, Waterloo, Ontario, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wentao",
"surname": "Liu",
"fullName": "Wentao Liu",
"affiliation": "Electrical and Computer Engineering, University of Waterloo, Waterloo, Ontario, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhou",
"surname": "Wang",
"fullName": "Zhou Wang",
"affiliation": "Electrical and Computer Engineering, University of Waterloo, Waterloo, Ontario, Canada",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-04-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2012/2216/0/06460543",
"title": "Nonlocal processing of 3D colored point clouds",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460543/12OmNvAiSJd",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200a895",
"title": "Walk in the Cloud: Learning Curves for Point Clouds Shape Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200a895/1BmEBcBzATC",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/insai/2021/0859/0/085900a136",
"title": "Registration of Point Clouds: A Survey",
"doi": null,
"abstractUrl": "/proceedings-article/insai/2021/085900a136/1CHwMbhCNQA",
"parentPublication": {
"id": "proceedings/insai/2021/0859/0",
"title": "2021 International Conference on Networking Systems of AI (INSAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/5555/01/09788570",
"title": "Hypergraph Representation for Detecting 3D Objects from Noisy Point Clouds",
"doi": null,
"abstractUrl": "/journal/tk/5555/01/09788570/1DU9toirrLG",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859723",
"title": "Deep Geometry Post-Processing for Decompressed Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859723/1G9DFQXOSME",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600g646",
"title": "Multimodal Colored Point Cloud to Image Alignment",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600g646/1H0KN0OXgGc",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600v1147",
"title": "No-Reference Point Cloud Quality Assessment via Domain Adaptation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600v1147/1H0LihR7AqI",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600o4799",
"title": "3DAC: Learning Attribute Compression for Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600o4799/1H0LpmYzOeI",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600b196",
"title": "Centroid Distance Keypoint Detector for Colored Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600b196/1LiO8nAtFok",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2022/6495/0/649500a616",
"title": "3D point cloud quality assessment method using Mahalanobis distance",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2022/649500a616/1MeoL090t9e",
"parentPublication": {
"id": "proceedings/sitis/2022/6495/0",
"title": "2022 16th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09756299",
"articleId": "1CvQiJgja2k",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09760161",
"articleId": "1CHsCvUiJQA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1CvQiJgja2k",
"doi": "10.1109/TVCG.2022.3166666",
"abstract": "Parametric face models, like morphable and blendshape models, have shown great potential in face representation, reconstruction, and animation. However, all these models focus on large-scale facial geometry. Facial details like wrinkles are not parameterized in these models, impeding their accuracy and realism. In this paper, we propose a method to learn a Semantically Disentangled Variational Autoencoder (SDVAE) to parameterize facial details and support independent detail manipulation as an extension of an off-the-shelf large-scale face model. Our method utilizes the non-linear capability of Deep Neural Networks for detail modeling, achieving better accuracy and greater representation power compared with linear models. In order to disentangle the semantic factors of identity, expression and age, we propose to eliminate the correlation between different factors in an adversarial manner. Therefore, wrinkle-level details of various identities, expressions, and ages can be generated and independently controlled by changing latent vectors of our SDVAE. We further leverage our model to reconstruct 3D faces via fitting to facial scans and images. Benefiting from our parametric model, we achieve accurate and robust reconstruction, and the reconstructed details can be easily animated and manipulated. We evaluate our method on practical applications, including scan fitting, image fitting, video tracking, model manipulation, and expression and age animation. Extensive experiments demonstrate that the proposed method can robustly model facial details and achieve better results than alternative methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Parametric face models, like morphable and blendshape models, have shown great potential in face representation, reconstruction, and animation. However, all these models focus on large-scale facial geometry. Facial details like wrinkles are not parameterized in these models, impeding their accuracy and realism. In this paper, we propose a method to learn a Semantically Disentangled Variational Autoencoder (SDVAE) to parameterize facial details and support independent detail manipulation as an extension of an off-the-shelf large-scale face model. Our method utilizes the non-linear capability of Deep Neural Networks for detail modeling, achieving better accuracy and greater representation power compared with linear models. In order to disentangle the semantic factors of identity, expression and age, we propose to eliminate the correlation between different factors in an adversarial manner. Therefore, wrinkle-level details of various identities, expressions, and ages can be generated and independently controlled by changing latent vectors of our SDVAE. We further leverage our model to reconstruct 3D faces via fitting to facial scans and images. Benefiting from our parametric model, we achieve accurate and robust reconstruction, and the reconstructed details can be easily animated and manipulated. We evaluate our method on practical applications, including scan fitting, image fitting, video tracking, model manipulation, and expression and age animation. Extensive experiments demonstrate that the proposed method can robustly model facial details and achieve better results than alternative methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Parametric face models, like morphable and blendshape models, have shown great potential in face representation, reconstruction, and animation. However, all these models focus on large-scale facial geometry. Facial details like wrinkles are not parameterized in these models, impeding their accuracy and realism. In this paper, we propose a method to learn a Semantically Disentangled Variational Autoencoder (SDVAE) to parameterize facial details and support independent detail manipulation as an extension of an off-the-shelf large-scale face model. Our method utilizes the non-linear capability of Deep Neural Networks for detail modeling, achieving better accuracy and greater representation power compared with linear models. In order to disentangle the semantic factors of identity, expression and age, we propose to eliminate the correlation between different factors in an adversarial manner. Therefore, wrinkle-level details of various identities, expressions, and ages can be generated and independently controlled by changing latent vectors of our SDVAE. We further leverage our model to reconstruct 3D faces via fitting to facial scans and images. Benefiting from our parametric model, we achieve accurate and robust reconstruction, and the reconstructed details can be easily animated and manipulated. We evaluate our method on practical applications, including scan fitting, image fitting, video tracking, model manipulation, and expression and age animation. Extensive experiments demonstrate that the proposed method can robustly model facial details and achieve better results than alternative methods.",
"title": "Semantically Disentangled Variational Autoencoder for Modeling 3D Facial Details",
"normalizedTitle": "Semantically Disentangled Variational Autoencoder for Modeling 3D Facial Details",
"fno": "09756299",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Solid Modeling",
"Three Dimensional Displays",
"Faces",
"Image Reconstruction",
"Geometry",
"Principal Component Analysis",
"Shape",
"Detail Reconstruction",
"Facial Animation",
"Semantic Disentanglement"
],
"authors": [
{
"givenName": "Jingwang",
"surname": "Ling",
"fullName": "Jingwang Ling",
"affiliation": "School of Software, Tsinghua University, 12442 Beijing, Beijing, China, 100084",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhibo",
"surname": "Wang",
"fullName": "Zhibo Wang",
"affiliation": "The Institute of CG&CAD, Tsinghua University, 12442 Beijing, Beijing, China, 100084",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ming",
"surname": "Lu",
"fullName": "Ming Lu",
"affiliation": "VAIL, Intel Labs China, Beijing, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Quan",
"surname": "Wang",
"fullName": "Quan Wang",
"affiliation": "SenseTime Research, SenseTime Group, 602673 Hong Kong, Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chen",
"surname": "Qian",
"fullName": "Chen Qian",
"affiliation": "Research Deparment, SenseTime Group Limited, Hong Kong, Hong Kong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Feng",
"surname": "Xu",
"fullName": "Feng Xu",
"affiliation": "School of Software, CG&CAD, Beijing, Beijing, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-04-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2017/4822/0/07926714",
"title": "Deep Feature Consistent Variational Autoencoder",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2017/07926714/12OmNAmVH7L",
"parentPublication": {
"id": "proceedings/wacv/2017/4822/0",
"title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032b031",
"title": "Large Pose 3D Face Reconstruction from a Single Image via Direct Volumetric CNN Regression",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032b031/12OmNCd2rI2",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2014/4308/0/4308a009",
"title": "Improving 3D Face Details Based on Normal Map of Hetero-source Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a009/12OmNCmpcLR",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200n3870",
"title": "VariTex: Variational Neural Face Textures",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200n3870/1BmGhEwLQL6",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2023/4544/0/10042668",
"title": "DisVAE: Disentangled Variational Autoencoder for High-Quality Facial Expression Features",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2023/10042668/1KOv1LhvlYI",
"parentPublication": {
"id": "proceedings/fg/2023/4544/0",
"title": "2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10061279",
"title": "ReenactArtFace: Artistic Face Image Reenactment",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10061279/1LiKMy3pdDO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300j428",
"title": "Photo-Realistic Facial Details Synthesis From Single Image",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300j428/1hVlh0SqiPe",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800h917",
"title": "Guided Variational Autoencoder for Disentanglement Learning",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800h917/1m3oiUnuaIM",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2020/6251/0/09378242",
"title": "FSRGAN-DB: Super-resolution Reconstruction Based on Facial Prior Knowledge",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2020/09378242/1s64JgJ8t32",
"parentPublication": {
"id": "proceedings/big-data/2020/6251/0",
"title": "2020 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/12/09606538",
"title": "AvatarMe<sup>++</sup>: Facial Shape and BRDF Inference With Photorealistic Rendering-Aware GANs",
"doi": null,
"abstractUrl": "/journal/tp/2022/12/09606538/1ymEN8wBXRC",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09754991",
"articleId": "1CubHSuE8bm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09756929",
"articleId": "1Cxva6pb2iA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1CAPfQoWmxW",
"name": "ttg555501-09756299s1-supp1-3166666.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09756299s1-supp1-3166666.mp4",
"extension": "mp4",
"size": "19.5 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1CubHSuE8bm",
"doi": "10.1109/TVCG.2022.3166159",
"abstract": "In this paper, we present OrthoAligner, a novel method to predict the visual outcome of orthodontic treatment in a portrait image. Unlike the state-of-the-art method, which relies on a 3D teeth model obtained from dental scanning, our method generates realistic alignment effects in images without requiring additional 3D information as input and thus making our system readily available to average users. The key of our approach is to employ the 3D geometric information encoded in an unsupervised generative model, i.e., StyleGAN in this paper. Instead of directly conducting translation in the image space, we embed the teeth region extracted from a given portrait to the latent space of the StyleGAN generator and propose a novel {latent} editing method to discover a geometrically meaningful editing path that yields the alignment process in the image space. To blend the edited mouth region with the original portrait image, we further introduce a BlendingNet to remove boundary artifacts and correct color inconsistency. We also extend our method to short video clips by propagating the alignment effects across neighboring frames. We evaluate our method in various orthodontic cases, compare it to the state-of-the-art and competitive baselines, and validate the effectiveness of each component.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we present OrthoAligner, a novel method to predict the visual outcome of orthodontic treatment in a portrait image. Unlike the state-of-the-art method, which relies on a 3D teeth model obtained from dental scanning, our method generates realistic alignment effects in images without requiring additional 3D information as input and thus making our system readily available to average users. The key of our approach is to employ the 3D geometric information encoded in an unsupervised generative model, i.e., StyleGAN in this paper. Instead of directly conducting translation in the image space, we embed the teeth region extracted from a given portrait to the latent space of the StyleGAN generator and propose a novel {latent} editing method to discover a geometrically meaningful editing path that yields the alignment process in the image space. To blend the edited mouth region with the original portrait image, we further introduce a BlendingNet to remove boundary artifacts and correct color inconsistency. We also extend our method to short video clips by propagating the alignment effects across neighboring frames. We evaluate our method in various orthodontic cases, compare it to the state-of-the-art and competitive baselines, and validate the effectiveness of each component.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we present OrthoAligner, a novel method to predict the visual outcome of orthodontic treatment in a portrait image. Unlike the state-of-the-art method, which relies on a 3D teeth model obtained from dental scanning, our method generates realistic alignment effects in images without requiring additional 3D information as input and thus making our system readily available to average users. The key of our approach is to employ the 3D geometric information encoded in an unsupervised generative model, i.e., StyleGAN in this paper. Instead of directly conducting translation in the image space, we embed the teeth region extracted from a given portrait to the latent space of the StyleGAN generator and propose a novel {latent} editing method to discover a geometrically meaningful editing path that yields the alignment process in the image space. To blend the edited mouth region with the original portrait image, we further introduce a BlendingNet to remove boundary artifacts and correct color inconsistency. We also extend our method to short video clips by propagating the alignment effects across neighboring frames. We evaluate our method in various orthodontic cases, compare it to the state-of-the-art and competitive baselines, and validate the effectiveness of each component.",
"title": "OrthoAligner: Image-based Teeth Alignment Prediction via Latent Style Manipulation",
"normalizedTitle": "OrthoAligner: Image-based Teeth Alignment Prediction via Latent Style Manipulation",
"fno": "09754991",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Teeth",
"Three Dimensional Displays",
"Image Reconstruction",
"Generative Adversarial Networks",
"Codes",
"Solid Modeling",
"Image Color Analysis",
"Teeth Alignment",
"GAN Inversion",
"Style GAN"
],
"authors": [
{
"givenName": "Beijia",
"surname": "Chen",
"fullName": "Beijia Chen",
"affiliation": "The State Key Lab of CAD&CG, Zhejiang University, 12377 Hangzhou, Zhejiang, China, 310058",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hongbo",
"surname": "Fu",
"fullName": "Hongbo Fu",
"affiliation": "School of Creative Media, City University of Hong Kong, 53025 Kowloon, Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kun",
"surname": "Zhou",
"fullName": "Kun Zhou",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Youyi",
"surname": "Zheng",
"fullName": "Youyi Zheng",
"affiliation": "Computer Science, Zhejiang University College of Computer Science and Technology, 366095 Hangzhou, Zhejiang, China, 310027",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-04-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccit/2008/3407/2/3407c967",
"title": "Performance Evaluation of Teeth Image Recognition System Based on Difference Image Entropy",
"doi": null,
"abstractUrl": "/proceedings-article/iccit/2008/3407c967/12OmNwE9OJC",
"parentPublication": {
"id": "proceedings/iccit/2008/3407/2",
"title": "Convergence Information Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aqtr/2006/0360/2/04022997",
"title": "Numerical Simulation of Periodontal Stress Distribution during Orthodontic Tipping of Single Rooted Teeth",
"doi": null,
"abstractUrl": "/proceedings-article/aqtr/2006/04022997/12OmNzahc8g",
"parentPublication": {
"id": "proceedings/aqtr/2006/0360/2",
"title": "International Conference on Automation, Quality and Testing, Robotics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2012/4875/0/4875a145",
"title": "A New Approach to Teeth Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2012/4875a145/12OmNzlUKwe",
"parentPublication": {
"id": "proceedings/ism/2012/4875/0",
"title": "2012 IEEE International Symposium on Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aqtr/2006/0360/2/04022996",
"title": "Analogical Model of Periodontal Stress Distribution during Orthodontic Tipping of Single Rooted Teeth",
"doi": null,
"abstractUrl": "/proceedings-article/aqtr/2006/04022996/12OmNzt0IuX",
"parentPublication": {
"id": "proceedings/aqtr/2006/0360/2",
"title": "International Conference on Automation, Quality and Testing, Robotics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2021/0126/0/09669660",
"title": "Analysis on Teeth Occlusion Distribution Based on Segmentation and Registration Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2021/09669660/1A9WeM2U9RC",
"parentPublication": {
"id": "proceedings/bibm/2021/0126/0",
"title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09933028",
"title": "Tooth Alignment Network Based on Landmark Constraints and Hierarchical Graph Structure",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09933028/1HVsnduN8e4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800g141",
"title": "StyleRig: Rigging StyleGAN for 3D Control Over Portrait Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800g141/1m3ng5xOC08",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800o4092",
"title": "Adversarial Latent Autoencoders",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800o4092/1m3okyROwx2",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2021/2463/0/246300b611",
"title": "Shared-latent Variable Network Alignment",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2021/246300b611/1wLcwLrusUw",
"parentPublication": {
"id": "proceedings/compsac/2021/2463/0",
"title": "2021 IEEE 45th Annual Computers, Software, and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icceai/2021/3960/0/396000a259",
"title": "Application of three-dimensional digital modeling of teeth and jaws in orthodontics teaching",
"doi": null,
"abstractUrl": "/proceedings-article/icceai/2021/396000a259/1xqyHh0kEi4",
"parentPublication": {
"id": "proceedings/icceai/2021/3960/0",
"title": "2021 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09754239",
"articleId": "1CpcE7ttZNC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09756299",
"articleId": "1CvQiJgja2k",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1CvQjWHlMfm",
"name": "ttg555501-09754991s1-supp1-3166159.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09754991s1-supp1-3166159.pdf",
"extension": "pdf",
"size": "147 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1CpcE7ttZNC",
"doi": "10.1109/TVCG.2022.3165860",
"abstract": "Recently, deep learning based multi-view stereo (MVS) networks have demonstrated their excellent performance on various benchmarks. In this paper, we present an effective and efficient recurrent neural network (RNN) for accurate and complete dense point cloud reconstruction. Instead of regularizing the cost volume via conventional 3D CNN or unidirectional RNN like previous attempts, we adopt a bidirectional hybrid Long Short-Term Memory (LSTM) based structure for cost volume regularization. The proposed bidirectional recurrent regularization is able to perceive full-space context information comparable to 3D CNNs while saving runtime memory. For post-processing, we introduce a visibility based approach for depth map refinement to obtain more accurate dense point clouds. Extensive experiments on DTU, Tanks and Temples and ETH3D datasets demonstrate that our method outperforms previous state-of-the-art MVS methods and exhibits high memory efficiency at runtime.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recently, deep learning based multi-view stereo (MVS) networks have demonstrated their excellent performance on various benchmarks. In this paper, we present an effective and efficient recurrent neural network (RNN) for accurate and complete dense point cloud reconstruction. Instead of regularizing the cost volume via conventional 3D CNN or unidirectional RNN like previous attempts, we adopt a bidirectional hybrid Long Short-Term Memory (LSTM) based structure for cost volume regularization. The proposed bidirectional recurrent regularization is able to perceive full-space context information comparable to 3D CNNs while saving runtime memory. For post-processing, we introduce a visibility based approach for depth map refinement to obtain more accurate dense point clouds. Extensive experiments on DTU, Tanks and Temples and ETH3D datasets demonstrate that our method outperforms previous state-of-the-art MVS methods and exhibits high memory efficiency at runtime.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recently, deep learning based multi-view stereo (MVS) networks have demonstrated their excellent performance on various benchmarks. In this paper, we present an effective and efficient recurrent neural network (RNN) for accurate and complete dense point cloud reconstruction. Instead of regularizing the cost volume via conventional 3D CNN or unidirectional RNN like previous attempts, we adopt a bidirectional hybrid Long Short-Term Memory (LSTM) based structure for cost volume regularization. The proposed bidirectional recurrent regularization is able to perceive full-space context information comparable to 3D CNNs while saving runtime memory. For post-processing, we introduce a visibility based approach for depth map refinement to obtain more accurate dense point clouds. Extensive experiments on DTU, Tanks and Temples and ETH3D datasets demonstrate that our method outperforms previous state-of-the-art MVS methods and exhibits high memory efficiency at runtime.",
"title": "Bidirectional Hybrid LSTM Based Recurrent Neural Network for Multi-view Stereo",
"normalizedTitle": "Bidirectional Hybrid LSTM Based Recurrent Neural Network for Multi-view Stereo",
"fno": "09754239",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Costs",
"Feature Extraction",
"Three Dimensional Displays",
"Runtime",
"Point Cloud Compression",
"Image Reconstruction",
"Recurrent Neural Networks",
"3 D Reconstruction",
"Deep Learning",
"Multi View Stereo",
"Recurrent Neural Network",
"Point Clouds"
],
"authors": [
{
"givenName": "Zizhuang",
"surname": "Wei",
"fullName": "Zizhuang Wei",
"affiliation": "Graphics & Interaction Lab, Peking University, 12465 Beijing, Beijing, China, 100871",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Qingtian",
"surname": "Zhu",
"fullName": "Qingtian Zhu",
"affiliation": "Dept. of EECS, Peking University, 12465 Beijing, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chen",
"surname": "Min",
"fullName": "Chen Min",
"affiliation": "Dept. of EECS, Peking University, 12465 Beijing, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yisong",
"surname": "Chen",
"fullName": "Yisong Chen",
"affiliation": "Dept. of EECS, Peking University, 12465 Beijing, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guoping",
"surname": "Wang",
"fullName": "Guoping Wang",
"affiliation": "of Computer Science, Peking University, 12465 Beijing, Beijing, China, 100871",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-04-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ictai/2016/4459/0/4459a495",
"title": "Mongolian Named Entity Recognition with Bidirectional Recurrent Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2016/4459a495/12OmNBp52zt",
"parentPublication": {
"id": "proceedings/ictai/2016/4459/0",
"title": "2016 IEEE 28th International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dexa/2017/1051/0/1051a118",
"title": "Opinion Expression Detection via Deep Bidirectional C-GRUs",
"doi": null,
"abstractUrl": "/proceedings-article/dexa/2017/1051a118/12OmNs59JOC",
"parentPublication": {
"id": "proceedings/dexa/2017/1051/0",
"title": "2017 28th International Workshop on Database and Expert Systems Applications (DEXA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2013/4999/0/06628777",
"title": "Offline Printed Urdu Nastaleeq Script Recognition with Bidirectional LSTM Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2013/06628777/12OmNzC5T1i",
"parentPublication": {
"id": "proceedings/icdar/2013/4999/0",
"title": "2013 12th International Conference on Document Analysis and Recognition (ICDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2018/04/07919264",
"title": "Video Super-Resolution via Bidirectional Recurrent Convolutional Networks",
"doi": null,
"abstractUrl": "/journal/tp/2018/04/07919264/13rRUxAAT8X",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipdps/2022/8106/0/810600a941",
"title": "Task-based Acceleration of Bidirectional Recurrent Neural Networks on Multi-core Architectures",
"doi": null,
"abstractUrl": "/proceedings-article/ipdps/2022/810600a941/1F1W7FYKuHK",
"parentPublication": {
"id": "proceedings/ipdps/2022/8106/0",
"title": "2022 IEEE International Parallel and Distributed Processing Symposium (IPDPS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2019/2607/2/260702a183",
"title": "Modeling Genome Data Using Bidirectional LSTM",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2019/260702a183/1cYiotBhVoA",
"parentPublication": {
"id": "compsac/2019/2607/2",
"title": "2019 IEEE 43rd Annual Computer Software and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2021/05/08890615",
"title": "Knowledge Base Reasoning with Convolutional-Based Recurrent Neural Networks",
"doi": null,
"abstractUrl": "/journal/tk/2021/05/08890615/1eX8mA3pSbm",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300f520",
"title": "Recurrent MVSNet for High-Resolution Multi-View Stereo Depth Inference",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300f520/1gyrpNNO9QA",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2020/6215/0/09313452",
"title": "Cross2Self-attentive Bidirectional Recurrent Neural Network with BERT for Biomedical Semantic Text Similarity",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2020/09313452/1qmg91OKLXG",
"parentPublication": {
"id": "proceedings/bibm/2020/6215/0",
"title": "2020 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2020/6251/0/09377803",
"title": "Explainable Software vulnerability detection based on Attention-based Bidirectional Recurrent Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2020/09377803/1s64mZ5Lexi",
"parentPublication": {
"id": "proceedings/big-data/2020/6251/0",
"title": "2020 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09754243",
"articleId": "1CpcDU5uTsY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09754991",
"articleId": "1CubHSuE8bm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1CpcDU5uTsY",
"doi": "10.1109/TVCG.2022.3166071",
"abstract": "The development and validation of Clinical Decision Support Models (CDSM) based on Bayesian networks (BN) is commonly done in a collaborative work between medical researchers providing the domain expertise and computer scientists developing the decision support model. Although modern tools provide facilities for data-driven model generation, domain experts are required to validate the accuracy of the learned model and to provide expert knowledge for fine-tuning it while computer scientists are needed to integrate this knowledge in the learned model (hybrid modeling approach). This generally time-expensive procedure hampers CDSM generation and updating. To address this problem, we developed a novel interactive visual approach allowing medical researchers with less knowledge in CDSM to develop and validate BNs based on domain specific data mainly independently and thus, diminishing the need for an additional computer scientist. In this context, we abstracted and simplified the common workflow in BN development as well as adjusted the workflow to medical experts needs. We demonstrate our visual approach with data of endometrial cancer patients and evaluated it with six medical researchers who are domain experts in the gynecological field.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The development and validation of Clinical Decision Support Models (CDSM) based on Bayesian networks (BN) is commonly done in a collaborative work between medical researchers providing the domain expertise and computer scientists developing the decision support model. Although modern tools provide facilities for data-driven model generation, domain experts are required to validate the accuracy of the learned model and to provide expert knowledge for fine-tuning it while computer scientists are needed to integrate this knowledge in the learned model (hybrid modeling approach). This generally time-expensive procedure hampers CDSM generation and updating. To address this problem, we developed a novel interactive visual approach allowing medical researchers with less knowledge in CDSM to develop and validate BNs based on domain specific data mainly independently and thus, diminishing the need for an additional computer scientist. In this context, we abstracted and simplified the common workflow in BN development as well as adjusted the workflow to medical experts needs. We demonstrate our visual approach with data of endometrial cancer patients and evaluated it with six medical researchers who are domain experts in the gynecological field.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The development and validation of Clinical Decision Support Models (CDSM) based on Bayesian networks (BN) is commonly done in a collaborative work between medical researchers providing the domain expertise and computer scientists developing the decision support model. Although modern tools provide facilities for data-driven model generation, domain experts are required to validate the accuracy of the learned model and to provide expert knowledge for fine-tuning it while computer scientists are needed to integrate this knowledge in the learned model (hybrid modeling approach). This generally time-expensive procedure hampers CDSM generation and updating. To address this problem, we developed a novel interactive visual approach allowing medical researchers with less knowledge in CDSM to develop and validate BNs based on domain specific data mainly independently and thus, diminishing the need for an additional computer scientist. In this context, we abstracted and simplified the common workflow in BN development as well as adjusted the workflow to medical experts needs. We demonstrate our visual approach with data of endometrial cancer patients and evaluated it with six medical researchers who are domain experts in the gynecological field.",
"title": "Visual Assistance in Development and Validation of Bayesian Networks for Clinical Decision Support",
"normalizedTitle": "Visual Assistance in Development and Validation of Bayesian Networks for Clinical Decision Support",
"fno": "09754243",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Computational Modeling",
"Visualization",
"Medical Diagnostic Imaging",
"Data Models",
"Bayes Methods",
"Tumors",
"Probability Distribution",
"Bayesian Networks",
"Visual Analysis",
"Clinical Decision Support",
"Causal Model Development"
],
"authors": [
{
"givenName": "Juliane",
"surname": "Muller-Sielaff",
"fullName": "Juliane Muller-Sielaff",
"affiliation": "Department of Neurology, Otto von Guericke Universitat Magdeburg, 9376 Magdeburg, Saxony-Anhalt, Germany, 39106",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Seyed Behnam",
"surname": "Beladi",
"fullName": "Seyed Behnam Beladi",
"affiliation": "Department of Neurology, Otto von Guericke Universitat Magdeburg, 9376 Magdeburg, Sachsen-Anhalt, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Monique",
"surname": "Meuschke",
"fullName": "Monique Meuschke",
"affiliation": "Department of Simulation and Graphics, Otto von Guericke Universitat Magdeburg, 9376 Magdeburg, Sachsen-Anhalt, Germany, 39106",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Stephanie",
"surname": "Vrede",
"fullName": "Stephanie Vrede",
"affiliation": "Department of Obstetrics & Gynecology, Radboud university medical center, Nijmegen, Gelderland, Netherlands",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Peter J.F.",
"surname": "Lucas",
"fullName": "Peter J.F. Lucas",
"affiliation": "Department of Data Science, University of Twente, 3230 Enschede, Overijssel, Netherlands",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Johanna M.A.",
"surname": "Pijnenborg",
"fullName": "Johanna M.A. Pijnenborg",
"affiliation": "Department of Obstetrics & Gynecology, Radboud university medical center, Nijmegen, Gelderland, Netherlands",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Steffen",
"surname": "Oeltze-Jafra",
"fullName": "Steffen Oeltze-Jafra",
"affiliation": "Dept. of Neurology, Otto von Guericke University, 9376 Magdeburg, Saxony-Anhalt, Germany, 39120",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-04-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/compsacw/2014/3578/0/3578a128",
"title": "Bayesian Model Averaging of Bayesian Network Classifiers for Intrusion Detection",
"doi": null,
"abstractUrl": "/proceedings-article/compsacw/2014/3578a128/12OmNvSbBGm",
"parentPublication": {
"id": "proceedings/compsacw/2014/3578/0",
"title": "2014 IEEE 38th International Computer Software and Applications Conference Workshops (COMPSACW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acsat/2013/2758/0/2758a005",
"title": "Modified Full Bayesian Networks Classifiers for Medical Diagnosis",
"doi": null,
"abstractUrl": "/proceedings-article/acsat/2013/2758a005/12OmNwNwzGU",
"parentPublication": {
"id": "proceedings/acsat/2013/2758/0",
"title": "2013 International Conference on Advanced Computer Science Applications and Technologies (ACSAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/issrew/2017/2387/0/2387a302",
"title": "Establishing Verification and Validation Objectives for Safety-Critical Bayesian Networks",
"doi": null,
"abstractUrl": "/proceedings-article/issrew/2017/2387a302/12OmNzDvShe",
"parentPublication": {
"id": "proceedings/issrew/2017/2387/0",
"title": "2017 IEEE International Symposium on Software Reliability Engineering Workshops (ISSREW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2014/4302/0/4302b073",
"title": "Learning Sparse Gaussian Bayesian Network Structure by Variable Grouping",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2014/4302b073/12OmNzzxuxd",
"parentPublication": {
"id": "proceedings/icdm/2014/4302/0",
"title": "2014 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2016/11/07364252",
"title": "Learning Discriminative Bayesian Networks from High-Dimensional Continuous Neuroimaging Data",
"doi": null,
"abstractUrl": "/journal/tp/2016/11/07364252/13rRUB6Sq1H",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/2014/06/06808495",
"title": "Bayesian Networks For Evidence-Based Decision-Making in Software Engineering",
"doi": null,
"abstractUrl": "/journal/ts/2014/06/06808495/13rRUxASudc",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2007/10/k1420",
"title": "Using Ranked Nodes to Model Qualitative Judgments in Bayesian Networks",
"doi": null,
"abstractUrl": "/journal/tk/2007/10/k1420/13rRUy0HYRQ",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/synasc/2018/0625/0/062500a027",
"title": "Inferring, Learning and Modelling Complex Systems with Bayesian Networks. A Tutorial",
"doi": null,
"abstractUrl": "/proceedings-article/synasc/2018/062500a027/1bhJxTrNy1O",
"parentPublication": {
"id": "proceedings/synasc/2018/0625/0",
"title": "2018 20th International Symposium on Symbolic and Numeric Algorithms for Scientific Computing (SYNASC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccst/2020/8138/0/813800a485",
"title": "Research on Intelligent Traditional Chinese Medicine Prescription Model Based on Noisy-or Bayesian Network",
"doi": null,
"abstractUrl": "/proceedings-article/iccst/2020/813800a485/1p1gr8DDqHS",
"parentPublication": {
"id": "proceedings/iccst/2020/8138/0",
"title": "2020 International Conference on Culture-oriented Science & Technology (ICCST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichi/2020/5382/0/09374327",
"title": "Causal Bayesian Networks for Medical Diagnosis: A Case Study in Rheumatoid Arthritis",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2020/09374327/1rUJ1pjOWIM",
"parentPublication": {
"id": "proceedings/ichi/2020/5382/0",
"title": "2020 IEEE International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09750868",
"articleId": "1ClSREG2DeM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09754239",
"articleId": "1CpcE7ttZNC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1CubGL0C34c",
"name": "ttg555501-09754243s1-supp1-3166071.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09754243s1-supp1-3166071.mp4",
"extension": "mp4",
"size": "174 MB",
"__typename": "WebExtraType"
},
{
"id": "1CubGmIcPLO",
"name": "ttg555501-09754243s1-supp2-3166071.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09754243s1-supp2-3166071.pdf",
"extension": "pdf",
"size": "1.79 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1ClSREG2DeM",
"doi": "10.1109/TVCG.2022.3165385",
"abstract": "The outbreak of coronavirus disease (COVID-19) has swept across more than 180 countries and territories since late January 2020. As a worldwide emergency response, governments have implemented various measures and policies, such as self-quarantine, travel restrictions, work from home, and regional lockdown, to control the spread of the epidemic. These countermeasures seek to restrict human mobility because COVID-19 is a highly contagious disease that is spread by human-to-human transmission. Medical experts and policymakers have expressed the urgency to effectively evaluate the outcome of human restriction policies with the aid of big data and information technology. Thus, based on big human mobility data and city POI data, an interactive visual analytics system called Epidemic Mobility (EpiMob) was designed in this study. The system interactively simulates the changes in human mobility and infection status in response to the implementation of a certain restriction policy or a combination of policies (e.g., regional lockdown, telecommuting, screening). Users can conveniently designate the spatial and temporal ranges for different mobility restriction policies. Then, the results reflecting the infection situation under different policies are dynamically displayed and can be flexibly compared and analyzed in depth. Multiple case studies consisting of interviews with domain experts were conducted in the largest metropolitan area of Japan (i.e., Greater Tokyo Area) to demonstrate that the system can provide insight into the effects of different human mobility restriction policies for epidemic control, through measurements and comparisons.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The outbreak of coronavirus disease (COVID-19) has swept across more than 180 countries and territories since late January 2020. As a worldwide emergency response, governments have implemented various measures and policies, such as self-quarantine, travel restrictions, work from home, and regional lockdown, to control the spread of the epidemic. These countermeasures seek to restrict human mobility because COVID-19 is a highly contagious disease that is spread by human-to-human transmission. Medical experts and policymakers have expressed the urgency to effectively evaluate the outcome of human restriction policies with the aid of big data and information technology. Thus, based on big human mobility data and city POI data, an interactive visual analytics system called Epidemic Mobility (EpiMob) was designed in this study. The system interactively simulates the changes in human mobility and infection status in response to the implementation of a certain restriction policy or a combination of policies (e.g., regional lockdown, telecommuting, screening). Users can conveniently designate the spatial and temporal ranges for different mobility restriction policies. Then, the results reflecting the infection situation under different policies are dynamically displayed and can be flexibly compared and analyzed in depth. Multiple case studies consisting of interviews with domain experts were conducted in the largest metropolitan area of Japan (i.e., Greater Tokyo Area) to demonstrate that the system can provide insight into the effects of different human mobility restriction policies for epidemic control, through measurements and comparisons.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The outbreak of coronavirus disease (COVID-19) has swept across more than 180 countries and territories since late January 2020. As a worldwide emergency response, governments have implemented various measures and policies, such as self-quarantine, travel restrictions, work from home, and regional lockdown, to control the spread of the epidemic. These countermeasures seek to restrict human mobility because COVID-19 is a highly contagious disease that is spread by human-to-human transmission. Medical experts and policymakers have expressed the urgency to effectively evaluate the outcome of human restriction policies with the aid of big data and information technology. Thus, based on big human mobility data and city POI data, an interactive visual analytics system called Epidemic Mobility (EpiMob) was designed in this study. The system interactively simulates the changes in human mobility and infection status in response to the implementation of a certain restriction policy or a combination of policies (e.g., regional lockdown, telecommuting, screening). Users can conveniently designate the spatial and temporal ranges for different mobility restriction policies. Then, the results reflecting the infection situation under different policies are dynamically displayed and can be flexibly compared and analyzed in depth. Multiple case studies consisting of interviews with domain experts were conducted in the largest metropolitan area of Japan (i.e., Greater Tokyo Area) to demonstrate that the system can provide insight into the effects of different human mobility restriction policies for epidemic control, through measurements and comparisons.",
"title": "EpiMob: Interactive Visual Analytics of Citywide Human Mobility Restrictions for Epidemic Control",
"normalizedTitle": "EpiMob: Interactive Visual Analytics of Citywide Human Mobility Restrictions for Epidemic Control",
"fno": "09750868",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Epidemics",
"Urban Areas",
"COVID 19",
"Visual Analytics",
"Trajectory",
"Data Models",
"Analytical Models",
"Human Mobility Simulation",
"Epidemic Control",
"Visual Analytics",
"Interactive System",
"Big Trajectory Data"
],
"authors": [
{
"givenName": "Chuang",
"surname": "Yang",
"fullName": "Chuang Yang",
"affiliation": "Center for Spatial Information Science, The University of Tokyo, 13143 Kashiwa, Chiba, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhiwen",
"surname": "Zhang",
"fullName": "Zhiwen Zhang",
"affiliation": "Center for Spatial Information Science, The University of Tokyo, 13143 Bunkyo-ku, Tokyo, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zipei",
"surname": "Fan",
"fullName": "Zipei Fan",
"affiliation": "Center for Spatial Information Science, The University of Tokyo, 13143 Bunkyo-ku, Chiba, Japan, 113-0033",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Renhe",
"surname": "Jiang",
"fullName": "Renhe Jiang",
"affiliation": "Information Technology Center, The University of Tokyo, 13143 Bunkyo-ku, Tokyo, Japan, 113-0033",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Quanjun",
"surname": "Chen",
"fullName": "Quanjun Chen",
"affiliation": "Center for Spatial Information Science, The University of Tokyo, 13143 Bunkyo-ku, Tokyo, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xuan",
"surname": "Song",
"fullName": "Xuan Song",
"affiliation": "Center for Spatial Information Science, University of Tokyo, 13143 Bunkyo-ku, Tokyo, Japan, 113-0033",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ryosuke",
"surname": "Shibasaki",
"fullName": "Ryosuke Shibasaki",
"affiliation": "Center for Spatial Information Science, The University of Tokyo, 13143 Kashiwa, Chiba, Japan",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-04-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/bibm/2021/0126/0/09669328",
"title": "Multi-modal Information Fusion-powered Regional Covid-19 Epidemic Forecasting",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2021/09669328/1A9VXybnCTu",
"parentPublication": {
"id": "proceedings/bibm/2021/0126/0",
"title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsmt/2021/2063/0/206300a363",
"title": "Research on the mechanism of epidemic network emergencies based on grounded theory",
"doi": null,
"abstractUrl": "/proceedings-article/iccsmt/2021/206300a363/1E2wcOKPgIg",
"parentPublication": {
"id": "proceedings/iccsmt/2021/2063/0",
"title": "2021 2nd International Conference on Computer Science and Management Technology (ICCSMT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2022/08/09847322",
"title": "A Privacy-Assured Data Lifecycle for Epidemic-Handling Systems",
"doi": null,
"abstractUrl": "/magazine/co/2022/08/09847322/1FvJurM0IUw",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dasc-picom-cbdcom-cyberscitech/2022/6297/0/09927898",
"title": "Exploiting mobility data to forecast Covid-19 spread",
"doi": null,
"abstractUrl": "/proceedings-article/dasc-picom-cbdcom-cyberscitech/2022/09927898/1J4Cty6oXPq",
"parentPublication": {
"id": "proceedings/dasc-picom-cbdcom-cyberscitech/2022/6297/0",
"title": "2022 IEEE Intl Conf on Dependable, Autonomic and Secure Computing, Intl Conf on Pervasive Intelligence and Computing, Intl Conf on Cloud and Big Data Computing, Intl Conf on Cyber Science and Technology Congress (DASC/PiCom/CBDCom/CyberSciTech)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2022/4609/0/460900b189",
"title": "Human Mobility Driven Modeling of an Infectious Disease",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2022/460900b189/1KBqWHalR0k",
"parentPublication": {
"id": "proceedings/icdmw/2022/4609/0",
"title": "2022 IEEE International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2022/4609/0/460900b181",
"title": "VISUAL ANALYTICS OF MOBILITY NETWORK CHANGES OBSERVED USING MOBILE PHONE DATA DURING COVID-19 PANDEMIC",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2022/460900b181/1KBr1oG6Q8g",
"parentPublication": {
"id": "proceedings/icdmw/2022/4609/0",
"title": "2022 IEEE International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icftic/2022/2195/0/10075291",
"title": "Design of Student Information Management System for Chinese University in Epidemic",
"doi": null,
"abstractUrl": "/proceedings-article/icftic/2022/10075291/1LRlbsX5r1e",
"parentPublication": {
"id": "proceedings/icftic/2022/2195/0",
"title": "2022 4th International Conference on Frontiers Technology of Information and Computer (ICFTIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icuems/2020/8832/0/09151826",
"title": "Study on Epidemic Prevention and Control Strategy of COVID -19 Based on Personnel Flow Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/icuems/2020/09151826/1lRlPHkznna",
"parentPublication": {
"id": "proceedings/icuems/2020/8832/0",
"title": "2020 International Conference on Urban Engineering and Management Science (ICUEMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icphds/2020/8571/0/857100a086",
"title": "Features of Taiwan’s Public Health System Revealed in the Covid-19 Epidemic",
"doi": null,
"abstractUrl": "/proceedings-article/icphds/2020/857100a086/1rxhoREH7IQ",
"parentPublication": {
"id": "proceedings/icphds/2020/8571/0",
"title": "2020 International Conference on Public Health and Data Science (ICPHDS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigdia/2020/2232/0/223200a354",
"title": "Data-driven Network Model of COVID-19 Epidemic in Italy",
"doi": null,
"abstractUrl": "/proceedings-article/bigdia/2020/223200a354/1stvxHw4RVu",
"parentPublication": {
"id": "proceedings/bigdia/2020/2232/0",
"title": "2020 6th International Conference on Big Data and Information Analytics (BigDIA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09747941",
"articleId": "1CdB6lneKkg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09754239",
"articleId": "1CpcE7ttZNC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1CnxOTvk760",
"name": "ttg555501-09750868s1-supp1-3165385.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09750868s1-supp1-3165385.pdf",
"extension": "pdf",
"size": "1.31 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1CdB6lneKkg",
"doi": "10.1109/TVCG.2022.3163727",
"abstract": "Comprehensively evaluating and comparing researchers' academic performance is complicated due to the intrinsic complexity of scholarly data. Different scholarly evaluation tasks often require the publication and citation data to be investigated in various manners. In this paper, we present an interactive visualization framework, SD^2, to enable flexible data partition and composition to support various analysis requirements within a single system. SD^2 features the hierarchical histogram, a novel visual representation for flexibly slicing and dicing the data, allowing different aspects of scholarly performance to be studied and compared. We also leverage the state-of-the-art set visualization technique to select individual researchers or combine multiple scholars for comprehensive visual comparison. We conduct multiple rounds of expert evaluation to study the effectiveness and usability of SD^2 and revise the design and system implementation accordingly. The effectiveness of SD^2 is demonstrated via multiple usage scenarios with each aiming to answer a specific, commonly raised question.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Comprehensively evaluating and comparing researchers' academic performance is complicated due to the intrinsic complexity of scholarly data. Different scholarly evaluation tasks often require the publication and citation data to be investigated in various manners. In this paper, we present an interactive visualization framework, SD^2, to enable flexible data partition and composition to support various analysis requirements within a single system. SD^2 features the hierarchical histogram, a novel visual representation for flexibly slicing and dicing the data, allowing different aspects of scholarly performance to be studied and compared. We also leverage the state-of-the-art set visualization technique to select individual researchers or combine multiple scholars for comprehensive visual comparison. We conduct multiple rounds of expert evaluation to study the effectiveness and usability of SD^2 and revise the design and system implementation accordingly. The effectiveness of SD^2 is demonstrated via multiple usage scenarios with each aiming to answer a specific, commonly raised question.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Comprehensively evaluating and comparing researchers' academic performance is complicated due to the intrinsic complexity of scholarly data. Different scholarly evaluation tasks often require the publication and citation data to be investigated in various manners. In this paper, we present an interactive visualization framework, SD^2, to enable flexible data partition and composition to support various analysis requirements within a single system. SD^2 features the hierarchical histogram, a novel visual representation for flexibly slicing and dicing the data, allowing different aspects of scholarly performance to be studied and compared. We also leverage the state-of-the-art set visualization technique to select individual researchers or combine multiple scholars for comprehensive visual comparison. We conduct multiple rounds of expert evaluation to study the effectiveness and usability of SD^2 and revise the design and system implementation accordingly. The effectiveness of SD^2 is demonstrated via multiple usage scenarios with each aiming to answer a specific, commonly raised question.",
"title": "SD^2: Slicing and Dicing Scholarly Data for Interactive Evaluation of Academic Performance",
"normalizedTitle": "SD^2: Slicing and Dicing Scholarly Data for Interactive Evaluation of Academic Performance",
"fno": "09747941",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Histograms",
"Bars",
"Task Analysis",
"Market Research",
"Collaboration",
"Visual Analytics",
"Scholarly Performance",
"Publication",
"Citation",
"Hierarchical Histogram",
"Visual Analytics"
],
"authors": [
{
"givenName": "Zhichun",
"surname": "Guo",
"fullName": "Zhichun Guo",
"affiliation": "Department of Computer Science and Engineering, University of Notre Dame, 6111 Notre Dame, Indiana, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jun",
"surname": "Tao",
"fullName": "Jun Tao",
"affiliation": "School of Computer Science and Engineering, Sun Yat-sen University, Guangzhou, Guangdong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Siming",
"surname": "Chen",
"fullName": "Siming Chen",
"affiliation": "School of Data Science, Fudan University, 12478 Shanghai, Shanghai, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nitesh",
"surname": "Chawla",
"fullName": "Nitesh Chawla",
"affiliation": "Department of Computer Science and Engineering, University of Notre Dame, Notre Dame, Indiana, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chaoli",
"surname": "Wang",
"fullName": "Chaoli Wang",
"affiliation": "Department of Computer Science and Engineering, University of Notre Dame, Notre Dame, Indiana, United States, 46556",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icdew/2014/3481/0/06818305",
"title": "Scholarly big data information extraction and integration in the CiteSeerχ digital library",
"doi": null,
"abstractUrl": "/proceedings-article/icdew/2014/06818305/12OmNvEyR7r",
"parentPublication": {
"id": "proceedings/icdew/2014/3481/0",
"title": "2014 IEEE 30th International Conference on Data Engineering Workshops (ICDEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/e-science/2015/9325/0/9325a288",
"title": "Interactive Analytic Systems for Understanding the Scholarly Impact of Large-Scale E-science Cyberenvironments",
"doi": null,
"abstractUrl": "/proceedings-article/e-science/2015/9325a288/12OmNxiKscb",
"parentPublication": {
"id": "proceedings/e-science/2015/9325/0",
"title": "2015 IEEE 11th International Conference on e-Science (e-Science)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2015/9926/0/07363826",
"title": "Matisse: A visual analytics system for exploring emotion trends in social media text streams",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2015/07363826/12OmNyFCvXJ",
"parentPublication": {
"id": "proceedings/big-data/2015/9926/0",
"title": "2015 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bdcat/2018/5502/0/550200a142",
"title": "GDup: De-Duplication of Scholarly Communication Big Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/bdcat/2018/550200a142/17D45W9KVId",
"parentPublication": {
"id": "proceedings/bdcat/2018/5502/0",
"title": "2018 IEEE/ACM 5th International Conference on Big Data Computing Applications and Technologies (BDCAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bdva/2018/9194/0/08534019",
"title": "Multiple Workspaces in Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/bdva/2018/08534019/17D45W9KVIu",
"parentPublication": {
"id": "proceedings/bdva/2018/9194/0",
"title": "2018 International Symposium on Big Data Visual and Immersive Analytics (BDVA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904433",
"title": "Evaluating the Use of Uncertainty Visualisations for Imputations of Data Missing At Random in Scatterplots",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904433/1H1gkkbe0hy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2020/9134/0/913400a344",
"title": "Comparison of four visual analytics techniques for the visualization of adverse drug event rates in clinical trials",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2020/913400a344/1rSRc4omAj6",
"parentPublication": {
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09511808",
"title": "VisInReport: Complementing Visual Discourse Analytics Through Personalized Insight Reports",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09511808/1vYRHccYKDS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552447",
"title": "VITALITY: Promoting Serendipitous Discovery of Academic Literature with Transformers & Visual Analytics",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552447/1xic0dHxM9a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552870",
"title": "Seek for Success: A Visualization Approach for Understanding the Dynamics of Academic Careers",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552870/1xic90zZWDu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09745822",
"articleId": "1CbVo1JSjtK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09750868",
"articleId": "1ClSREG2DeM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1Cizk2hvJ84",
"name": "ttg555501-09747941s1-supp1-3163727.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09747941s1-supp1-3163727.pdf",
"extension": "pdf",
"size": "139 kB",
"__typename": "WebExtraType"
},
{
"id": "1CizjUYLwFa",
"name": "ttg555501-09747941s1-supp2-3163727.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09747941s1-supp2-3163727.mp4",
"extension": "mp4",
"size": "51.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1CbVo1JSjtK",
"doi": "10.1109/TVCG.2022.3163794",
"abstract": "Human path-planning operates differently from deterministic AI-based path-planning algorithms due to the decay and distortion in a human's spatial memory and the lack of complete scene knowledge. Here, we present a cognitive model of path-planning that simulates human-like learning of unfamiliar environments, supports systematic degradation in spatial memory, and distorts spatial recall during path-planning. We propose a Dynamic Hierarchical Cognitive Graph (DHCG) representation to encode the environment structure by incorporating two critical spatial memory biases during exploration: categorical adjustment and \\sequence order effect. We then extend the ‘`Fine-To-Coarse’' (FTC), the most prevalent path-planning heuristic, to incorporate spatial uncertainty during recall through the DHCG. We conducted a lab-based Virtual Reality (VR) experiment to validate the proposed cognitive path-planning model and made three observations: (1) a statistically significant impact of sequence order effect on participants' route-choices, (2) approximately three hierarchical levels in the DHCG according to participants' recall data, and (3) similar trajectories and significantly similar wayfinding performances between participants and simulated cognitive agents on identical path-planning tasks. Furthermore, we performed two detailed simulation experiments with different FTC variants on a Manhattan-style grid. Experimental results demonstrate that the proposed cognitive path-planning model successfully produces human-like paths and can capture human wayfinding's complex and dynamic nature, which traditional AI-based path-planning algorithms cannot capture.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Human path-planning operates differently from deterministic AI-based path-planning algorithms due to the decay and distortion in a human's spatial memory and the lack of complete scene knowledge. Here, we present a cognitive model of path-planning that simulates human-like learning of unfamiliar environments, supports systematic degradation in spatial memory, and distorts spatial recall during path-planning. We propose a Dynamic Hierarchical Cognitive Graph (DHCG) representation to encode the environment structure by incorporating two critical spatial memory biases during exploration: categorical adjustment and \\sequence order effect. We then extend the ‘`Fine-To-Coarse’' (FTC), the most prevalent path-planning heuristic, to incorporate spatial uncertainty during recall through the DHCG. We conducted a lab-based Virtual Reality (VR) experiment to validate the proposed cognitive path-planning model and made three observations: (1) a statistically significant impact of sequence order effect on participants' route-choices, (2) approximately three hierarchical levels in the DHCG according to participants' recall data, and (3) similar trajectories and significantly similar wayfinding performances between participants and simulated cognitive agents on identical path-planning tasks. Furthermore, we performed two detailed simulation experiments with different FTC variants on a Manhattan-style grid. Experimental results demonstrate that the proposed cognitive path-planning model successfully produces human-like paths and can capture human wayfinding's complex and dynamic nature, which traditional AI-based path-planning algorithms cannot capture.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Human path-planning operates differently from deterministic AI-based path-planning algorithms due to the decay and distortion in a human's spatial memory and the lack of complete scene knowledge. Here, we present a cognitive model of path-planning that simulates human-like learning of unfamiliar environments, supports systematic degradation in spatial memory, and distorts spatial recall during path-planning. We propose a Dynamic Hierarchical Cognitive Graph (DHCG) representation to encode the environment structure by incorporating two critical spatial memory biases during exploration: categorical adjustment and \\sequence order effect. We then extend the ‘`Fine-To-Coarse’' (FTC), the most prevalent path-planning heuristic, to incorporate spatial uncertainty during recall through the DHCG. We conducted a lab-based Virtual Reality (VR) experiment to validate the proposed cognitive path-planning model and made three observations: (1) a statistically significant impact of sequence order effect on participants' route-choices, (2) approximately three hierarchical levels in the DHCG according to participants' recall data, and (3) similar trajectories and significantly similar wayfinding performances between participants and simulated cognitive agents on identical path-planning tasks. Furthermore, we performed two detailed simulation experiments with different FTC variants on a Manhattan-style grid. Experimental results demonstrate that the proposed cognitive path-planning model successfully produces human-like paths and can capture human wayfinding's complex and dynamic nature, which traditional AI-based path-planning algorithms cannot capture.",
"title": "Cognitive Path Planning with Spatial Memory Distortion",
"normalizedTitle": "Cognitive Path Planning with Spatial Memory Distortion",
"fno": "09745822",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Solid Modeling",
"Computational Modeling",
"Navigation",
"Distortion",
"Games",
"Data Models",
"Task Analysis",
"Cognitive Path Planning",
"Human Wayfinding",
"Fine To Course",
"Spatial Memory",
"Agglomerative Hierarchical Clustering"
],
"authors": [
{
"givenName": "Rohit K.",
"surname": "Dubey",
"fullName": "Rohit K. Dubey",
"affiliation": "Computational Modeling and Simulation, Technical University of Munich, 9184 Munchen, Baveria, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Samuel S.",
"surname": "Sohn",
"fullName": "Samuel S. Sohn",
"affiliation": "Computer Science, Rutgers University, 242612 New Brunswick, New Jersey, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tyler",
"surname": "Thrash",
"fullName": "Tyler Thrash",
"affiliation": "DGESS, ETH Zurich, 27219 Zurich, Zrich, Switzerland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Christoph",
"surname": "Holscher",
"fullName": "Christoph Holscher",
"affiliation": "DGESS, ETH Zurich, 27219 Zurich, Zrich, Switzerland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mubbasir",
"surname": "Kapadia",
"fullName": "Mubbasir Kapadia",
"affiliation": "Computer Science, Rutgers University, Piscataway, New Jersey, United States, 08854",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Andre",
"surname": "Borrmann",
"fullName": "Andre Borrmann",
"affiliation": "Computation Modeling and Simulation, Technical University of Munich, Munich, Baveria, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icesd/1994/5780/0/00302277",
"title": "A hybrid approach to path planning in autonomous agents",
"doi": null,
"abstractUrl": "/proceedings-article/icesd/1994/00302277/12OmNALUozj",
"parentPublication": {
"id": "proceedings/icesd/1994/5780/0",
"title": "Proceedings of International Conference on Expert Systems for Development",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2014/6854/0/6854a296",
"title": "A Multi-layer Approach for Interactive Path Planning Control in Virtual Reality Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2014/6854a296/12OmNANTAzk",
"parentPublication": {
"id": "proceedings/icvrv/2014/6854/0",
"title": "2014 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2013/3211/0/3211a726",
"title": "Global Path Planning for Autonomous Mobile Robot Using Genetic Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2013/3211a726/12OmNBNM92n",
"parentPublication": {
"id": "proceedings/sitis/2013/3211/0",
"title": "2013 International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457h272",
"title": "Cognitive Mapping and Planning for Visual Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457h272/12OmNBsLPi6",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1988/0852/0/00012305",
"title": "Path planning among moving obstacles using spatial indexing",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1988/00012305/12OmNvDqsF6",
"parentPublication": {
"id": "proceedings/robot/1988/0852/0",
"title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gcis/2010/9247/3/05709370",
"title": "Path Planning for UAV in Radar Network Area",
"doi": null,
"abstractUrl": "/proceedings-article/gcis/2010/05709370/12OmNvRU0f9",
"parentPublication": {
"id": "proceedings/gcis/2010/9247/3",
"title": "2010 Second WRI Global Congress on Intelligent Systems (GCIS 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ex/1994/06/x6057",
"title": "Multistrategy Adaptive Path Planning",
"doi": null,
"abstractUrl": "/magazine/ex/1994/06/x6057/13rRUyg2jPf",
"parentPublication": {
"id": "mags/ex",
"title": "IEEE Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmeae/2021/9540/0/954000a020",
"title": "Path Planning Simulation of a quadrotor in ROS/Gazebo using RGPPM",
"doi": null,
"abstractUrl": "/proceedings-article/icmeae/2021/954000a020/1GZjAuSN89q",
"parentPublication": {
"id": "proceedings/icmeae/2021/9540/0",
"title": "2021 International Conference on Mechatronics, Electronics and Automotive Engineering (ICMEAE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412869",
"title": "EEG-Based Cognitive State Assessment Using Deep Ensemble Model and Filter Bank Common Spatial Pattern",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412869/1tmhosoXTLq",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvris/2020/9636/0/963600a946",
"title": "Path Planning Based on Improved Ant Colony Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/icvris/2020/963600a946/1x4Z4G0n9Cw",
"parentPublication": {
"id": "proceedings/icvris/2020/9636/0",
"title": "2020 International Conference on Virtual Reality and Intelligent Systems (ICVRIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09745848",
"articleId": "1CbVnSejsjK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09747941",
"articleId": "1CdB6lneKkg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1CbVnSejsjK",
"doi": "10.1109/TVCG.2022.3163765",
"abstract": "The continuous growth in availability and access to data presents a major challenge to the human analyst. As the manual analysis of large and complex datasets is nowadays practically impossible, the need for assisting tools that can automate the analysis process while keeping the human analyst in the loop is imperative. A large and growing body of literature recognizes the crucial role of automation in Visual Analytics and suggests that automation is among the most important constituents for effective Visual Analytics systems. Today, however, there is no appropriate taxonomy nor terminology for assessing the extent of automation in a Visual Analytics system. In this paper, we aim to address this gap by introducing a model of levels of automation tailored for the Visual Analytics domain. The consistent terminology of the proposed taxonomy could provide a ground for users/readers/reviewers to describe and compare automation in Visual Analytics systems. Our taxonomy is grounded on a combination of several existing and well-established taxonomies of levels of automation in the human-machine interaction domain and relevant models within the visual analytics field. To exemplify the proposed taxonomy, we selected a set of existing systems from the event-sequence analytics domain and mapped the automation of their visual analytics process stages against the automation levels in our taxonomy.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The continuous growth in availability and access to data presents a major challenge to the human analyst. As the manual analysis of large and complex datasets is nowadays practically impossible, the need for assisting tools that can automate the analysis process while keeping the human analyst in the loop is imperative. A large and growing body of literature recognizes the crucial role of automation in Visual Analytics and suggests that automation is among the most important constituents for effective Visual Analytics systems. Today, however, there is no appropriate taxonomy nor terminology for assessing the extent of automation in a Visual Analytics system. In this paper, we aim to address this gap by introducing a model of levels of automation tailored for the Visual Analytics domain. The consistent terminology of the proposed taxonomy could provide a ground for users/readers/reviewers to describe and compare automation in Visual Analytics systems. Our taxonomy is grounded on a combination of several existing and well-established taxonomies of levels of automation in the human-machine interaction domain and relevant models within the visual analytics field. To exemplify the proposed taxonomy, we selected a set of existing systems from the event-sequence analytics domain and mapped the automation of their visual analytics process stages against the automation levels in our taxonomy.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The continuous growth in availability and access to data presents a major challenge to the human analyst. As the manual analysis of large and complex datasets is nowadays practically impossible, the need for assisting tools that can automate the analysis process while keeping the human analyst in the loop is imperative. A large and growing body of literature recognizes the crucial role of automation in Visual Analytics and suggests that automation is among the most important constituents for effective Visual Analytics systems. Today, however, there is no appropriate taxonomy nor terminology for assessing the extent of automation in a Visual Analytics system. In this paper, we aim to address this gap by introducing a model of levels of automation tailored for the Visual Analytics domain. The consistent terminology of the proposed taxonomy could provide a ground for users/readers/reviewers to describe and compare automation in Visual Analytics systems. Our taxonomy is grounded on a combination of several existing and well-established taxonomies of levels of automation in the human-machine interaction domain and relevant models within the visual analytics field. To exemplify the proposed taxonomy, we selected a set of existing systems from the event-sequence analytics domain and mapped the automation of their visual analytics process stages against the automation levels in our taxonomy.",
"title": "A Model for Types and Levels of Automation in Visual Analytics: a Survey, a Taxonomy, and Examples",
"normalizedTitle": "A Model for Types and Levels of Automation in Visual Analytics: a Survey, a Taxonomy, and Examples",
"fno": "09745848",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Automation",
"Taxonomy",
"Visual Analytics",
"Data Mining",
"Data Visualization",
"Pipelines",
"Terminology",
"Visual Analytics",
"Levels Of Automation",
"Taxonomy",
"Framework",
"Event Sequence Analytics"
],
"authors": [
{
"givenName": "Veronika",
"surname": "Domova",
"fullName": "Veronika Domova",
"affiliation": "Mechanical Engineering, Center of Design Research, Stanford University, 6429 Stanford, California, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Katerina",
"surname": "Vrotsou",
"fullName": "Katerina Vrotsou",
"affiliation": "Department of Science and Technology, Linkping University, Norrkping, stergtland, Sweden",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/bracis/2013/5092/0/5092a169",
"title": "Terminology Learning through Taxonomy Discovery",
"doi": null,
"abstractUrl": "/proceedings-article/bracis/2013/5092a169/12OmNA1DMkO",
"parentPublication": {
"id": "proceedings/bracis/2013/5092/0",
"title": "2013 Brazilian Conference on Intelligent Systems (BRACIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/euromicro/2005/2431/0/01517731",
"title": "A taxonomy of software component models",
"doi": null,
"abstractUrl": "/proceedings-article/euromicro/2005/01517731/12OmNqIzh1a",
"parentPublication": {
"id": "proceedings/euromicro/2005/2431/0",
"title": "EUROMICRO Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eurmic/2005/2431/0/01517731",
"title": "A taxonomy of software component models",
"doi": null,
"abstractUrl": "/proceedings-article/eurmic/2005/01517731/12OmNqOwQCq",
"parentPublication": {
"id": "proceedings/eurmic/2005/2431/0",
"title": "Proceedings. 31st Euromicro Conference on Software Engineering and Advanced Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ares/2008/3102/0/3102a624",
"title": "A Revised Taxonomy of Data Collection Mechanisms with a Focus on Intrusion Detection",
"doi": null,
"abstractUrl": "/proceedings-article/ares/2008/3102a624/12OmNwGIcBg",
"parentPublication": {
"id": "proceedings/ares/2008/3102/0",
"title": "2008 Third International Conference on Availability, Reliability and Security",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsc/2017/1600/0/1600a368",
"title": "Tasks for Visual Analytics in Multilayer Networks",
"doi": null,
"abstractUrl": "/proceedings-article/dsc/2017/1600a368/12OmNz61dsf",
"parentPublication": {
"id": "proceedings/dsc/2017/1600/0",
"title": "2017 IEEE Second International Conference on Data Science in Cyberspace (DSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/03/06908006",
"title": "Personal Visualization and Personal Visual Analytics",
"doi": null,
"abstractUrl": "/journal/tg/2015/03/06908006/13rRUyYBlgA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09903295",
"title": "LargeNetVis: Visual Exploration of Large Temporal Networks Based on Community Taxonomies",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09903295/1GZokLgYdTW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08805439",
"title": "The Validity, Generalizability and Feasibility of Summative Evaluation Methods in Visual Analytics",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08805439/1cG4DVd6FcQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2019/3485/0/348500a084",
"title": "A Taxonomy of Game Elements for Gamification in Educational Contexts: Proposal and Evaluation",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2019/348500a084/1cYi2xeH3cA",
"parentPublication": {
"id": "proceedings/icalt/2019/3485/2161-377X",
"title": "2019 IEEE 19th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2020/9134/0/913400a368",
"title": "A Characterization of Data Exchange between Visual Analytics Tools",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2020/913400a368/1rSRaA2LJBK",
"parentPublication": {
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09745335",
"articleId": "1CagHUR61pe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09745822",
"articleId": "1CbVo1JSjtK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1CagHUR61pe",
"doi": "10.1109/TVCG.2022.3163676",
"abstract": "Synthesizing human motion with a global structure, such as a choreography, is a challenging task. Existing methods tend to concentrate on local smooth pose transitions and neglect the global context or the theme of the motion. In this work, we present a music-driven motion synthesis framework that generates long-term sequences of human motions which are synchronized with the input beats, and jointly form a global structure that respects a specific dance genre. In addition, our framework enables generation of diverse motions that are controlled by the content of the music, and not only by the beat. Our music-driven dance synthesis framework is a hierarchical system that consists of three levels: pose, motif, and choreography. The pose level consists of an LSTM component that generates temporally coherent sequences of poses. The motif level guides sets of consecutive poses to form a movement that belongs to a specific distribution using a novel motion perceptual-loss. And the choreography level selects the order of the performed movements and drives the system to follow the global structure of a dance genre. Our results demonstrate the effectiveness of our music-driven framework to generate natural and consistent movements on various dance types, having control over the content of the synthesized motions, and respecting the overall structure of the dance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Synthesizing human motion with a global structure, such as a choreography, is a challenging task. Existing methods tend to concentrate on local smooth pose transitions and neglect the global context or the theme of the motion. In this work, we present a music-driven motion synthesis framework that generates long-term sequences of human motions which are synchronized with the input beats, and jointly form a global structure that respects a specific dance genre. In addition, our framework enables generation of diverse motions that are controlled by the content of the music, and not only by the beat. Our music-driven dance synthesis framework is a hierarchical system that consists of three levels: pose, motif, and choreography. The pose level consists of an LSTM component that generates temporally coherent sequences of poses. The motif level guides sets of consecutive poses to form a movement that belongs to a specific distribution using a novel motion perceptual-loss. And the choreography level selects the order of the performed movements and drives the system to follow the global structure of a dance genre. Our results demonstrate the effectiveness of our music-driven framework to generate natural and consistent movements on various dance types, having control over the content of the synthesized motions, and respecting the overall structure of the dance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Synthesizing human motion with a global structure, such as a choreography, is a challenging task. Existing methods tend to concentrate on local smooth pose transitions and neglect the global context or the theme of the motion. In this work, we present a music-driven motion synthesis framework that generates long-term sequences of human motions which are synchronized with the input beats, and jointly form a global structure that respects a specific dance genre. In addition, our framework enables generation of diverse motions that are controlled by the content of the music, and not only by the beat. Our music-driven dance synthesis framework is a hierarchical system that consists of three levels: pose, motif, and choreography. The pose level consists of an LSTM component that generates temporally coherent sequences of poses. The motif level guides sets of consecutive poses to form a movement that belongs to a specific distribution using a novel motion perceptual-loss. And the choreography level selects the order of the performed movements and drives the system to follow the global structure of a dance genre. Our results demonstrate the effectiveness of our music-driven framework to generate natural and consistent movements on various dance types, having control over the content of the synthesized motions, and respecting the overall structure of the dance.",
"title": "Rhythm is a Dancer: Music-Driven Motion Synthesis with Global Structure",
"normalizedTitle": "Rhythm is a Dancer: Music-Driven Motion Synthesis with Global Structure",
"fno": "09745335",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Animation",
"Synchronization",
"Task Analysis",
"Aerospace Electronics",
"Rhythm",
"Computer Science",
"Skeleton",
"Animation",
"Global Structure Consistency",
"Motion Motifs",
"Music Driven",
"Motion Signatures"
],
"authors": [
{
"givenName": "Andreas",
"surname": "Aristidou",
"fullName": "Andreas Aristidou",
"affiliation": "Computer Science, University of Cyprus, 54557 Nicosia, Nicosia, Cyprus",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anastasios",
"surname": "Yiannakidis",
"fullName": "Anastasios Yiannakidis",
"affiliation": "Computer Science, University of Cyprus, 54557 Nicosia, Nicosia, Cyprus",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kfir",
"surname": "Aberman",
"fullName": "Kfir Aberman",
"affiliation": "Computer Science, Tel Aviv University, 26745 Tel Aviv, Tel Aviv, Israel",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Daniel",
"surname": "Cohen-Or",
"fullName": "Daniel Cohen-Or",
"affiliation": "Computer Science, Tel Aviv University, 26745 Tel Aviv, Tel Aviv, Israel",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ariel",
"surname": "Shamir",
"fullName": "Ariel Shamir",
"affiliation": "Efi Arazi School of Computer Science, Interdisciplinary Center Herzliya, 42727 Herzliya, Tel Aviv, Israel",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yiorgos",
"surname": "Chrysanthou",
"fullName": "Yiorgos Chrysanthou",
"affiliation": "Computer Science, University of Cyprus, 54557 Nicosia, Nicosia, Cyprus",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2011/348/0/06011912",
"title": "Motion synthesis for synchronizing with streaming music by segment-based search on metadata motion graphs",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06011912/12OmNAoUTkt",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2014/4677/0/4677a071",
"title": "Feasibility Study for Contemporary Dance E-Learning: An Interactive Creation Support System Using 3D Motion Data",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2014/4677a071/12OmNBaBuQH",
"parentPublication": {
"id": "proceedings/cw/2014/4677/0",
"title": "2014 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2015/9403/0/9403a200",
"title": "Automatic Composition by Body-Part Motion Synthesis for Supporting Dance Creation",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2015/9403a200/12OmNyOq55Y",
"parentPublication": {
"id": "proceedings/cw/2015/9403/0",
"title": "2015 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sip/2015/9855/0/9855a005",
"title": "Classification of Dance Motions with Depth Cameras Using Subsequence Dynamic Time Warping",
"doi": null,
"abstractUrl": "/proceedings-article/sip/2015/9855a005/12OmNyo1nLK",
"parentPublication": {
"id": "proceedings/sip/2015/9855/0",
"title": "2015 8th International Conference on Signal Processing, Image Processing and Pattern Recognition (SIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2014/4677/0/4677a253",
"title": "Sketch-Based Dance Choreography",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2014/4677a253/12OmNzaQoBR",
"parentPublication": {
"id": "proceedings/cw/2014/4677/0",
"title": "2014 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/03/ttg2012030501",
"title": "Example-Based Automatic Music-Driven Conventional Dance Motion Synthesis",
"doi": null,
"abstractUrl": "/journal/tg/2012/03/ttg2012030501/13rRUwwaKt6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200n3381",
"title": "AI Choreographer: Music Conditioned 3D Dance Generation with AIST++",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200n3381/1BmJ1TiWSB2",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2022/7218/0/09859441",
"title": "DAMUS: A Collaborative System for Choreography and Music Composition",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2022/09859441/1G4EWcJ9cCk",
"parentPublication": {
"id": "proceedings/icmew/2022/7218/0",
"title": "2022 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10018173",
"title": "Keyframe Control of Music-driven 3D Dance Generation",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10018173/1JYZ6TXyjgk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2021/1865/0/186500a348",
"title": "Dance to Music: Generative Choreography with Music using Mixture Density Networks",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2021/186500a348/1xPslGYA8Gk",
"parentPublication": {
"id": "proceedings/mipr/2021/1865/0",
"title": "2021 IEEE 4th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09744001",
"articleId": "1C8BFV420lq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09745848",
"articleId": "1CbVnSejsjK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1CbVnyIfsOI",
"name": "ttg555501-09745335s1-supp1-3163676.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09745335s1-supp1-3163676.mp4",
"extension": "mp4",
"size": "82.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1C8BFV420lq",
"doi": "10.1109/TVCG.2022.3161130",
"abstract": "A seated user watching his avatar walking in Virtual Reality (VR) may have an impression of walking. In this paper, we show that such an impression can be extended to other postures and other locomotion exercises. We present two user studies in which participants wore a VR headset and observed a first-person avatar performing virtual exercises. In the first experiment, the avatar walked and the participants (n=36) tested the simulation in 3 different postures (standing, sitting and Fowler's posture). In the second experiment, other participants (n=18) were sitting and observed the avatar walking, jogging or stepping over virtual obstacles. We evaluated the impression of locomotion by measuring the impression of walking (respectively jogging or stepping) and embodiment in both experiments. The results show that participants had the impression of locomotion in either sitting, standing and Fowler's posture. However, Fowler's posture significantly decreased both the level of embodiment and the impression of locomotion. The sitting posture seems to decrease the sense of agency compared to standing posture. Results also show that the majority of the participants experienced an impression of locomotion during the virtual walking, jogging, and stepping exercises. The embodiment was not influenced by the type of virtual exercise. Overall, our results suggest that an impression of locomotion can be elicited in different users' postures and during different virtual locomotion exercises. They provide valuable insight for numerous VR applications in which the user observes a self-avatar moving, such as video games, gait rehabilitation, training, etc.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A seated user watching his avatar walking in Virtual Reality (VR) may have an impression of walking. In this paper, we show that such an impression can be extended to other postures and other locomotion exercises. We present two user studies in which participants wore a VR headset and observed a first-person avatar performing virtual exercises. In the first experiment, the avatar walked and the participants (n=36) tested the simulation in 3 different postures (standing, sitting and Fowler's posture). In the second experiment, other participants (n=18) were sitting and observed the avatar walking, jogging or stepping over virtual obstacles. We evaluated the impression of locomotion by measuring the impression of walking (respectively jogging or stepping) and embodiment in both experiments. The results show that participants had the impression of locomotion in either sitting, standing and Fowler's posture. However, Fowler's posture significantly decreased both the level of embodiment and the impression of locomotion. The sitting posture seems to decrease the sense of agency compared to standing posture. Results also show that the majority of the participants experienced an impression of locomotion during the virtual walking, jogging, and stepping exercises. The embodiment was not influenced by the type of virtual exercise. Overall, our results suggest that an impression of locomotion can be elicited in different users' postures and during different virtual locomotion exercises. They provide valuable insight for numerous VR applications in which the user observes a self-avatar moving, such as video games, gait rehabilitation, training, etc.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A seated user watching his avatar walking in Virtual Reality (VR) may have an impression of walking. In this paper, we show that such an impression can be extended to other postures and other locomotion exercises. We present two user studies in which participants wore a VR headset and observed a first-person avatar performing virtual exercises. In the first experiment, the avatar walked and the participants (n=36) tested the simulation in 3 different postures (standing, sitting and Fowler's posture). In the second experiment, other participants (n=18) were sitting and observed the avatar walking, jogging or stepping over virtual obstacles. We evaluated the impression of locomotion by measuring the impression of walking (respectively jogging or stepping) and embodiment in both experiments. The results show that participants had the impression of locomotion in either sitting, standing and Fowler's posture. However, Fowler's posture significantly decreased both the level of embodiment and the impression of locomotion. The sitting posture seems to decrease the sense of agency compared to standing posture. Results also show that the majority of the participants experienced an impression of locomotion during the virtual walking, jogging, and stepping exercises. The embodiment was not influenced by the type of virtual exercise. Overall, our results suggest that an impression of locomotion can be elicited in different users' postures and during different virtual locomotion exercises. They provide valuable insight for numerous VR applications in which the user observes a self-avatar moving, such as video games, gait rehabilitation, training, etc.",
"title": "Influence of user posture and virtual exercise on impression of locomotion during VR observation",
"normalizedTitle": "Influence of user posture and virtual exercise on impression of locomotion during VR observation",
"fno": "09744001",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Legged Locomotion",
"Avatars",
"Cybersickness",
"Rubber",
"Headphones",
"Games",
"Visualization",
"Action Observation",
"Locomotion",
"Posture",
"Virtual Exercise",
"Embodiment"
],
"authors": [
{
"givenName": "Justine",
"surname": "Saint-Aubert",
"fullName": "Justine Saint-Aubert",
"affiliation": "Hybrid team, Inria Centre de Recherche Rennes Bretagne Atlantique, 84224 Rennes, Brittany, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Melanie",
"surname": "Cogne",
"fullName": "Melanie Cogne",
"affiliation": "Physical and Rehabilitation Medicine, University Hospital Centre Rennes, 36684 Rennes, Bretagne, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Isabelle",
"surname": "Bonan",
"fullName": "Isabelle Bonan",
"affiliation": "Physical and Rehabilitation Medicine, University Hospital Centre Rennes, 36684 Rennes, Bretagne, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yoann",
"surname": "Launey",
"fullName": "Yoann Launey",
"affiliation": "Critical care unit, University Hospital Centre Rennes, 36684 Rennes, Bretagne, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anatole",
"surname": "Lecuyer",
"fullName": "Anatole Lecuyer",
"affiliation": "Hybrid team, Inria Centre de Recherche Rennes Bretagne Atlantique, 84224 Rennes, Brittany, France, 35042",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2008/2047/0/04476598",
"title": "LLCM-WIP: Low-Latency, Continuous-Motion Walking-in-Place",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2008/04476598/12OmNyQYtvN",
"parentPublication": {
"id": "proceedings/3dui/2008/2047/0",
"title": "2008 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2018/7447/0/744701a873",
"title": "Study on Aesthetic Impression of Female Body Type and Posture",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2018/744701a873/19m3G4aWrSM",
"parentPublication": {
"id": "proceedings/iiai-aai/2018/7447/0",
"title": "2018 7th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09737429",
"title": "Intentional Head-Motion Assisted Locomotion for Reducing Cybersickness",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09737429/1BQidPzNjBS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a696",
"title": "Seamless-walk: Novel Natural Virtual Reality Locomotion Method with a High-Resolution Tactile Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a696/1CJeXaYYtd6",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09911682",
"title": "Effect of Vibrations on Impression of Walking and Embodiment With First- and Third-Person Avatar",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09911682/1HeiWQWKlTG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798070",
"title": "User-Centered Extension of a Locomotion Typology: Movement-Related Sensory Feedback and Spatial Learning",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798070/1cJ18ja0QXC",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089654",
"title": "Effects of Locomotion Style and Body Visibility of a Telepresence Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089654/1jIxd00PzX2",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090634",
"title": "Rhythmic proprioceptive stimulation improves embodiment in a walking avatar when added to visual stimulation",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090634/1jIxkrgIlEY",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a452",
"title": "Studying the Inter-Relation Between Locomotion Techniques and Embodiment in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a452/1pysvNRUnD2",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a064",
"title": "A Short Description of an Ankle-Actuated Seated VR Locomotion Interface",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a064/1tnXf67lAWs",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09744472",
"articleId": "1C8BFCieD2U",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09745335",
"articleId": "1CagHUR61pe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1CagITh6kYE",
"name": "ttg555501-09744001s1-supp1-3161130.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09744001s1-supp1-3161130.mp4",
"extension": "mp4",
"size": "148 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1C8BFCieD2U",
"doi": "10.1109/TVCG.2022.3163349",
"abstract": "Merge trees, a type of topological descriptors, serve to identify and summarize the topological characteristics associated with scalar fields. They present a great potential for the analysis and visualization of time-varying data. First, they give compressed and topology-preserving representations of data instances. Second, their comparisons provide a basis for studying the relations among data instances, such as their distributions, clusters, outliers, and periodicities. A number of comparative measures have been developed for merge trees. However, these measures are often computationally expensive since they implicitly consider all possible correspondences between critical points of the merge trees. In this paper, we perform geometry aware comparisons of merge trees. The main idea is to decouple the computation of a comparative measure into two steps: a labeling step that generates a correspondence between the critical points of two merge trees, and a comparision step that computes distances between a pair of labeled merge trees by encoding them as matrices. We show that our approach is general, computationally efficient, and practically useful. Our general framework makes it possible to integrate geometric information of the data domain in the labeling process. At the same time, it reduces the computational complexity since not all possible correspondences have to be considered. We demonstrate via experiments that such geometry aware merge tree comparisons help to detect transitions, clusters, and periodicities of a time-varying dataset, as well as to diagnose and highlight the topological changes between adjacent data instances.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Merge trees, a type of topological descriptors, serve to identify and summarize the topological characteristics associated with scalar fields. They present a great potential for the analysis and visualization of time-varying data. First, they give compressed and topology-preserving representations of data instances. Second, their comparisons provide a basis for studying the relations among data instances, such as their distributions, clusters, outliers, and periodicities. A number of comparative measures have been developed for merge trees. However, these measures are often computationally expensive since they implicitly consider all possible correspondences between critical points of the merge trees. In this paper, we perform geometry aware comparisons of merge trees. The main idea is to decouple the computation of a comparative measure into two steps: a labeling step that generates a correspondence between the critical points of two merge trees, and a comparision step that computes distances between a pair of labeled merge trees by encoding them as matrices. We show that our approach is general, computationally efficient, and practically useful. Our general framework makes it possible to integrate geometric information of the data domain in the labeling process. At the same time, it reduces the computational complexity since not all possible correspondences have to be considered. We demonstrate via experiments that such geometry aware merge tree comparisons help to detect transitions, clusters, and periodicities of a time-varying dataset, as well as to diagnose and highlight the topological changes between adjacent data instances.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Merge trees, a type of topological descriptors, serve to identify and summarize the topological characteristics associated with scalar fields. They present a great potential for the analysis and visualization of time-varying data. First, they give compressed and topology-preserving representations of data instances. Second, their comparisons provide a basis for studying the relations among data instances, such as their distributions, clusters, outliers, and periodicities. A number of comparative measures have been developed for merge trees. However, these measures are often computationally expensive since they implicitly consider all possible correspondences between critical points of the merge trees. In this paper, we perform geometry aware comparisons of merge trees. The main idea is to decouple the computation of a comparative measure into two steps: a labeling step that generates a correspondence between the critical points of two merge trees, and a comparision step that computes distances between a pair of labeled merge trees by encoding them as matrices. We show that our approach is general, computationally efficient, and practically useful. Our general framework makes it possible to integrate geometric information of the data domain in the labeling process. At the same time, it reduces the computational complexity since not all possible correspondences have to be considered. We demonstrate via experiments that such geometry aware merge tree comparisons help to detect transitions, clusters, and periodicities of a time-varying dataset, as well as to diagnose and highlight the topological changes between adjacent data instances.",
"title": "Geometry Aware Merge Tree Comparisons for Time-Varying Data with Interleaving Distances",
"normalizedTitle": "Geometry Aware Merge Tree Comparisons for Time-Varying Data with Interleaving Distances",
"fno": "09744472",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Labeling",
"Measurement",
"Data Visualization",
"Geometry",
"Encoding",
"Data Analysis",
"Visualization",
"Merge Trees",
"Merge Tree Metrics",
"Topological Data Analysis",
"Topology In Visualization"
],
"authors": [
{
"givenName": "Lin",
"surname": "Yan",
"fullName": "Lin Yan",
"affiliation": "School of Computing, University of Utah, 7060 Salt Lake City, Utah, United States, 84112-9057",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Talha",
"surname": "Bin Masood",
"fullName": "Talha Bin Masood",
"affiliation": "Department of Science and Technology, Linköping University, Norrköping, Sweden",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Farhan",
"surname": "Rasheed",
"fullName": "Farhan Rasheed",
"affiliation": "Department of Science and Technology, Linköping University, Norrköping, Sweden",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ingrid",
"surname": "Hotz",
"fullName": "Ingrid Hotz",
"affiliation": "Department of Science and Technology, Linköping University, Norrköping, Sweden",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bei",
"surname": "Wang",
"fullName": "Bei Wang",
"affiliation": "Scientific Computing and Imaging Institute, University of Utah, SALT LAKE CITY, Utah, United States, 84112",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ldav/2017/0617/0/08231846",
"title": "Task-based augmented merge trees with Fibonacci heaps",
"doi": null,
"abstractUrl": "/proceedings-article/ldav/2017/08231846/12OmNzBwGrc",
"parentPublication": {
"id": "proceedings/ldav/2017/0617/0",
"title": "2017 IEEE 7th Symposium on Large Data Analysis and Visualization (LDAV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/03/08481543",
"title": "Edit Distance between Merge Trees",
"doi": null,
"abstractUrl": "/journal/tg/2020/03/08481543/146z4GS1UPK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09912347",
"title": "Computing a Stable Distance on Merge Trees",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09912347/1HeiTQ2soFO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/02/09920234",
"title": "Principal Geodesic Analysis of Merge Trees (and Persistence Diagrams)",
"doi": null,
"abstractUrl": "/journal/tg/2023/02/09920234/1HxSnktOqgU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/topoinvis/2022/9354/0/935400a029",
"title": "A Deformation-based Edit Distance for Merge Trees",
"doi": null,
"abstractUrl": "/proceedings-article/topoinvis/2022/935400a029/1J2XJrPDCgM",
"parentPublication": {
"id": "proceedings/topoinvis/2022/9354/0",
"title": "2022 Topological Data Analysis and Visualization (TopoInVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/topoinvis/2022/9354/0/935400a001",
"title": "Fast Merge Tree Computation via SYCL",
"doi": null,
"abstractUrl": "/proceedings-article/topoinvis/2022/935400a001/1J2XKMu23tu",
"parentPublication": {
"id": "proceedings/topoinvis/2022/9354/0",
"title": "2022 Topological Data Analysis and Visualization (TopoInVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/topoinvis/2022/9354/0/935400a113",
"title": "Subject-Specific Brain Activity Analysis in fMRI Data Using Merge Trees",
"doi": null,
"abstractUrl": "/proceedings-article/topoinvis/2022/935400a113/1J2XLcCgpVK",
"parentPublication": {
"id": "proceedings/topoinvis/2022/9354/0",
"title": "2022 Topological Data Analysis and Visualization (TopoInVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08794553",
"title": "A Structural Average of Labeled Merge Trees for Uncertainty Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08794553/1fe7uYD8R68",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/08/09420248",
"title": "Unordered Task-Parallel Augmented Merge Tree Construction",
"doi": null,
"abstractUrl": "/journal/tg/2021/08/09420248/1tdUMuQErm0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09555911",
"title": "Wasserstein Distances, Geodesics and Barycenters of Merge Trees",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09555911/1xlvYjicn7i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09741325",
"articleId": "1C0jdavrcC4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09744001",
"articleId": "1C8BFV420lq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1C0jdavrcC4",
"doi": "10.1109/TVCG.2022.3161962",
"abstract": "Extracting concise 3D curve skeletons with existing methods is still a serious challenge as these methods require tedious parameter adjustment to suppress the influence of shape boundary perturbations to avoid spurious branches. In this paper, we address this challenge by enhancing the capture of prominent features and using them for skeleton extraction, motivated by the observation that the shape is mainly represented by prominent features. Our method takes the medial mesh of the shape as input, which can maintain the shape topology well. We develop a series of novel measures for simplifying and contracting the medial mesh to capture prominent features and represent them concisely, by which means the influences of shape boundary perturbations on skeleton extraction are suppressed and the quantity of data needed for skeleton extraction is significantly reduced. As a result, we can robustly and concisely extract the curve skeleton based on prominent features, avoiding the trouble of tuning parameters and saving computations, as shown by experimental results.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Extracting concise 3D curve skeletons with existing methods is still a serious challenge as these methods require tedious parameter adjustment to suppress the influence of shape boundary perturbations to avoid spurious branches. In this paper, we address this challenge by enhancing the capture of prominent features and using them for skeleton extraction, motivated by the observation that the shape is mainly represented by prominent features. Our method takes the medial mesh of the shape as input, which can maintain the shape topology well. We develop a series of novel measures for simplifying and contracting the medial mesh to capture prominent features and represent them concisely, by which means the influences of shape boundary perturbations on skeleton extraction are suppressed and the quantity of data needed for skeleton extraction is significantly reduced. As a result, we can robustly and concisely extract the curve skeleton based on prominent features, avoiding the trouble of tuning parameters and saving computations, as shown by experimental results.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Extracting concise 3D curve skeletons with existing methods is still a serious challenge as these methods require tedious parameter adjustment to suppress the influence of shape boundary perturbations to avoid spurious branches. In this paper, we address this challenge by enhancing the capture of prominent features and using them for skeleton extraction, motivated by the observation that the shape is mainly represented by prominent features. Our method takes the medial mesh of the shape as input, which can maintain the shape topology well. We develop a series of novel measures for simplifying and contracting the medial mesh to capture prominent features and represent them concisely, by which means the influences of shape boundary perturbations on skeleton extraction are suppressed and the quantity of data needed for skeleton extraction is significantly reduced. As a result, we can robustly and concisely extract the curve skeleton based on prominent features, avoiding the trouble of tuning parameters and saving computations, as shown by experimental results.",
"title": "Robustly Extracting Concise 3D Curve Skeletons by Enhancing the Capture of Prominent Features",
"normalizedTitle": "Robustly Extracting Concise 3D Curve Skeletons by Enhancing the Capture of Prominent Features",
"fno": "09741325",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Skeleton",
"Shape",
"Feature Extraction",
"Topology",
"Perturbation Methods",
"Three Dimensional Displays",
"Surface Treatment",
"Curve Skeleton",
"Medial Surface",
"Set Cover",
"Edge Contraction"
],
"authors": [
{
"givenName": "Yiyao",
"surname": "Chu",
"fullName": "Yiyao Chu",
"affiliation": "institute of software, Chinese Academy of Sciences, 12381 Beijing, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wencheng",
"surname": "Wang",
"fullName": "Wencheng Wang",
"affiliation": "Institute of Software, State Key Laboratory of Computer Science, Beijing, Beijing, China, 100190",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lei",
"surname": "Li",
"fullName": "Lei Li",
"affiliation": "institute of software, Chinese Academy of Sciences, 12381, Beijing, China, 100190",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2007/1630/0/04409112",
"title": "On the Extraction of Curve Skeletons using Gradient Vector Flow",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2007/04409112/12OmNC1oT5B",
"parentPublication": {
"id": "proceedings/iccv/2007/1630/0",
"title": "2007 11th IEEE International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dimpvt/2012/4873/0/4873a371",
"title": "An Adaptive Hierarchical Approach to the Extraction of High Resolution Medial Surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/3dimpvt/2012/4873a371/12OmNqAU6yz",
"parentPublication": {
"id": "proceedings/3dimpvt/2012/4873/0",
"title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1992/2920/0/00202026",
"title": "Interval skeletons",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1992/00202026/12OmNqBKTLd",
"parentPublication": {
"id": "proceedings/icpr/1992/2920/0",
"title": "11th IAPR International Conference on Pattern Recognition. Vol. III. Conference C: Image, Speech and Signal Analysis,",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ncm/2009/3769/0/3769b348",
"title": "The Smoothed 3D Skeleton for Animation",
"doi": null,
"abstractUrl": "/proceedings-article/ncm/2009/3769b348/12OmNqI04VH",
"parentPublication": {
"id": "proceedings/ncm/2009/3769/0",
"title": "Networked Computing and Advanced Information Management, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsa/2008/3243/0/3243a452",
"title": "On the Structure of Straight Skeletons",
"doi": null,
"abstractUrl": "/proceedings-article/iccsa/2008/3243a452/12OmNrJ11DX",
"parentPublication": {
"id": "proceedings/iccsa/2008/3243/0",
"title": "2008 International Conference on Computational Sciences and Its Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2000/0750/1/00905486",
"title": "Object representation and comparison inferred from its medial axis",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2000/00905486/12OmNvkpljB",
"parentPublication": {
"id": "proceedings/icpr/2000/0750/1",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1992/2855/0/00223226",
"title": "Voronoi skeletons: theory and applications",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1992/00223226/12OmNxH9Xdu",
"parentPublication": {
"id": "proceedings/cvpr/1992/2855/0",
"title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2010/7029/0/05543279",
"title": "Straight skeletons for binary shapes",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2010/05543279/12OmNyUnEJa",
"parentPublication": {
"id": "proceedings/cvprw/2010/7029/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2016/01/07066924",
"title": "An Unified Multiscale Framework for Planar, Surface, and Curve Skeletonization",
"doi": null,
"abstractUrl": "/journal/tp/2016/01/07066924/13rRUxASuNO",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2021/0191/0/019100c142",
"title": "SkeletonNetV2: A Dense Channel Attention Blocks for Skeleton Extraction",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2021/019100c142/1yNi2zrB80U",
"parentPublication": {
"id": "proceedings/iccvw/2021/0191/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09737429",
"articleId": "1BQidPzNjBS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09744472",
"articleId": "1C8BFCieD2U",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1C1Y0HPzFf2",
"name": "ttg555501-09741325s1-supp1-3161962.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09741325s1-supp1-3161962.pdf",
"extension": "pdf",
"size": "3.68 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1BQidsAhMnS",
"doi": "10.1109/TVCG.2022.3158869",
"abstract": "Applications like physics, medicine, earth sciences, mechanical engineering, geo-engineering, bio-engineering use tensorial data. For example, tensors are used in formulating the balance equations of charge, mass, momentum, or energy as well as the constitutive relations that complement them. Some of these tensors (i.e. stiffness tensor, strain gradient, photo-elastic tensor) are of order higher than two. Currently, there are nearly no visualization techniques for such data beyond glyphs. An important reason for this is the limit of currently used tensor decomposition techniques. In this article, we propose to use the deviatoric decomposition to draw lines describing tensors of arbitrary order in three dimensions. The deviatoric decomposition splits a three-dimensional tensor of any order with any type of index symmetry into totally symmetric, traceless tensors. These tensors, called deviators, can be described by a unique set of directions (called multipoles by J. C. Maxwell) and scalars. These multipoles allow the definition of multipole lines which can be computed in a similar fashion to tensor lines and allow a line-based visualization of three-dimensional tensors of any order. We give examples for the visualization of symmetric, second-order tensor fields as well as fourth-order tensor fields. To allow an interpretation of the multipole lines, we analyze the connection between the multipoles and the eigenvectors/eigenvalues in the second-order case. For the fourth-order stiffness tensor, we prove relations between multipoles and the eigenvectors of the second-order right Cauchy-Green tensor and present different interpretations of the multipole lines.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Applications like physics, medicine, earth sciences, mechanical engineering, geo-engineering, bio-engineering use tensorial data. For example, tensors are used in formulating the balance equations of charge, mass, momentum, or energy as well as the constitutive relations that complement them. Some of these tensors (i.e. stiffness tensor, strain gradient, photo-elastic tensor) are of order higher than two. Currently, there are nearly no visualization techniques for such data beyond glyphs. An important reason for this is the limit of currently used tensor decomposition techniques. In this article, we propose to use the deviatoric decomposition to draw lines describing tensors of arbitrary order in three dimensions. The deviatoric decomposition splits a three-dimensional tensor of any order with any type of index symmetry into totally symmetric, traceless tensors. These tensors, called deviators, can be described by a unique set of directions (called multipoles by J. C. Maxwell) and scalars. These multipoles allow the definition of multipole lines which can be computed in a similar fashion to tensor lines and allow a line-based visualization of three-dimensional tensors of any order. We give examples for the visualization of symmetric, second-order tensor fields as well as fourth-order tensor fields. To allow an interpretation of the multipole lines, we analyze the connection between the multipoles and the eigenvectors/eigenvalues in the second-order case. For the fourth-order stiffness tensor, we prove relations between multipoles and the eigenvectors of the second-order right Cauchy-Green tensor and present different interpretations of the multipole lines.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Applications like physics, medicine, earth sciences, mechanical engineering, geo-engineering, bio-engineering use tensorial data. For example, tensors are used in formulating the balance equations of charge, mass, momentum, or energy as well as the constitutive relations that complement them. Some of these tensors (i.e. stiffness tensor, strain gradient, photo-elastic tensor) are of order higher than two. Currently, there are nearly no visualization techniques for such data beyond glyphs. An important reason for this is the limit of currently used tensor decomposition techniques. In this article, we propose to use the deviatoric decomposition to draw lines describing tensors of arbitrary order in three dimensions. The deviatoric decomposition splits a three-dimensional tensor of any order with any type of index symmetry into totally symmetric, traceless tensors. These tensors, called deviators, can be described by a unique set of directions (called multipoles by J. C. Maxwell) and scalars. These multipoles allow the definition of multipole lines which can be computed in a similar fashion to tensor lines and allow a line-based visualization of three-dimensional tensors of any order. We give examples for the visualization of symmetric, second-order tensor fields as well as fourth-order tensor fields. To allow an interpretation of the multipole lines, we analyze the connection between the multipoles and the eigenvectors/eigenvalues in the second-order case. For the fourth-order stiffness tensor, we prove relations between multipoles and the eigenvectors of the second-order right Cauchy-Green tensor and present different interpretations of the multipole lines.",
"title": "Visualizing Higher-Order 3D Tensors by Multipole Lines",
"normalizedTitle": "Visualizing Higher-Order 3D Tensors by Multipole Lines",
"fno": "09737134",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Tensors",
"Visualization",
"Stress",
"Strain",
"Indexes",
"Data Visualization",
"Anisotropic Magnetoresistance",
"Tensor Algebra",
"Higher Order Tensor",
"Line Based",
"Deviatoric Decomposition",
"Anisotropy"
],
"authors": [
{
"givenName": "Chiara",
"surname": "Hergl",
"fullName": "Chiara Hergl",
"affiliation": "Institut fr Mathematik und Informatik, Leipzig University, 9180 Leipzig, Saxony, Germany, 04109",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Thomas",
"surname": "Nagel",
"fullName": "Thomas Nagel",
"affiliation": "Chair of Soil Mechanics and Foundation Engineering, Technische Universitt Bergakademie Freiberg, 26545 Freiberg, Sachsen, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gerik",
"surname": "Scheuermann",
"fullName": "Gerik Scheuermann",
"affiliation": "Computer Science Institut, University of Leipzig, Leipzig, D, Germany, 04009",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2014/5209/0/5209d179",
"title": "Volume-Based Fabric Tensors through Lattice-Boltzmann Simulations",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209d179/12OmNASraxC",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2004/8788/0/87880313",
"title": "Topological Lines in 3D Tensor Fields",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2004/87880313/12OmNApLGKA",
"parentPublication": {
"id": "proceedings/ieee-vis/2004/8788/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/27660004",
"title": "HOT- Lines: Tracking Lines in Higher Order Tensor Fields",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/27660004/12OmNwMXnqd",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1999/0149/1/01491450",
"title": "The Tensors of Three Affine Views",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1999/01491450/12OmNwvVrIF",
"parentPublication": {
"id": "proceedings/cvpr/1999/0149/2",
"title": "Proceedings. 1999 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (Cat. No PR00149)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2002/7498/0/7498zheng",
"title": "Volume Deformation For Tensor Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2002/7498zheng/12OmNxA3YXe",
"parentPublication": {
"id": "proceedings/ieee-vis/2002/7498/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/01532773",
"title": "HOT-lines: tracking lines in higher order tensor fields",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/01532773/12OmNzWx07T",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010061595",
"title": "Superquadric Glyphs for Symmetric Second-Order Tensors",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010061595/13rRUxZzAhA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09905473",
"title": "Electromechanical Coupling in Electroactive Polymers – a Visual Analysis of a Third-Order Tensor Field",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09905473/1H2lfN8dsT6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933592",
"title": "Visualization of Symmetries in Fourth-Order Stiffness Tensors",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933592/1fTgHrkjNWE",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icaa/2021/3730/0/373000a968",
"title": "The Criteria for Nonsingular H-tensors and its Applications",
"doi": null,
"abstractUrl": "/proceedings-article/icaa/2021/373000a968/1zL1COqwvpm",
"parentPublication": {
"id": "proceedings/icaa/2021/3730/0",
"title": "2021 International Conference on Intelligent Computing, Automation and Applications (ICAA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09737401",
"articleId": "1BQicsNYBbi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09737429",
"articleId": "1BQidPzNjBS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1BQlFrjdV84",
"name": "ttg555501-09737134s1-supp1-3158869.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09737134s1-supp1-3158869.pdf",
"extension": "pdf",
"size": "156 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1BQicsNYBbi",
"doi": "10.1109/TVCG.2022.3160389",
"abstract": "Unmanned Aerial Vehicles (UAVs) exhibit great agility but usually require an experienced pilot to operate them in certain applications such as inspection for disaster scenarios or buildings. The reduction of cognitive overload when driving this kind of aerial robot becomes a challenge and several solutions can be found in the literature. A new virtual control scheme for reducing this cognitive overload when controlling an aerial robot is proposed in this paper. The architecture is based on a novel interaction Drone Exocentric Advanced Metaphor (DrEAM) located in a Cave Automated Virtual Environment (CAVE) and a real robot containing an embedded controller based on quaternion formulation. The testing room, where real robots are evolving, is located away from the CAVE and they are connected via UDP in a ground station. The user controls manually a virtual drone through the DrEAM interaction metaphor, and the real robot imitates autonomously in real time the trajectory imposed by the user in the virtual environment. Experimental results illustrate the easy implementation and feasibility of the proposed scheme in two different scenarios. Results from these tests show that the mental effort when controlling a drone using the proposed virtual control scheme is lower than when controlling it in direct view. Moreover, the easy maneuverability and controllability of the real drone is also demonstrated in real time experiments.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Unmanned Aerial Vehicles (UAVs) exhibit great agility but usually require an experienced pilot to operate them in certain applications such as inspection for disaster scenarios or buildings. The reduction of cognitive overload when driving this kind of aerial robot becomes a challenge and several solutions can be found in the literature. A new virtual control scheme for reducing this cognitive overload when controlling an aerial robot is proposed in this paper. The architecture is based on a novel interaction Drone Exocentric Advanced Metaphor (DrEAM) located in a Cave Automated Virtual Environment (CAVE) and a real robot containing an embedded controller based on quaternion formulation. The testing room, where real robots are evolving, is located away from the CAVE and they are connected via UDP in a ground station. The user controls manually a virtual drone through the DrEAM interaction metaphor, and the real robot imitates autonomously in real time the trajectory imposed by the user in the virtual environment. Experimental results illustrate the easy implementation and feasibility of the proposed scheme in two different scenarios. Results from these tests show that the mental effort when controlling a drone using the proposed virtual control scheme is lower than when controlling it in direct view. Moreover, the easy maneuverability and controllability of the real drone is also demonstrated in real time experiments.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Unmanned Aerial Vehicles (UAVs) exhibit great agility but usually require an experienced pilot to operate them in certain applications such as inspection for disaster scenarios or buildings. The reduction of cognitive overload when driving this kind of aerial robot becomes a challenge and several solutions can be found in the literature. A new virtual control scheme for reducing this cognitive overload when controlling an aerial robot is proposed in this paper. The architecture is based on a novel interaction Drone Exocentric Advanced Metaphor (DrEAM) located in a Cave Automated Virtual Environment (CAVE) and a real robot containing an embedded controller based on quaternion formulation. The testing room, where real robots are evolving, is located away from the CAVE and they are connected via UDP in a ground station. The user controls manually a virtual drone through the DrEAM interaction metaphor, and the real robot imitates autonomously in real time the trajectory imposed by the user in the virtual environment. Experimental results illustrate the easy implementation and feasibility of the proposed scheme in two different scenarios. Results from these tests show that the mental effort when controlling a drone using the proposed virtual control scheme is lower than when controlling it in direct view. Moreover, the easy maneuverability and controllability of the real drone is also demonstrated in real time experiments.",
"title": "Exocentric control scheme for robot applications: An immersive virtual reality approach",
"normalizedTitle": "Exocentric control scheme for robot applications: An immersive virtual reality approach",
"fno": "09737401",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Robots",
"Drones",
"Virtual Environments",
"Visualization",
"Cameras",
"Autonomous Aerial Vehicles",
"Testing",
"Virtual Robotics",
"Virtual Reality",
"UA Vs",
"Teleoperation",
"Automatic Control",
"Robotics"
],
"authors": [
{
"givenName": "Julio",
"surname": "Betancourt",
"fullName": "Julio Betancourt",
"affiliation": "Heudiasyc laboratory, CNRS, 27051 Compiegne, Oise, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Baptiste",
"surname": "Wojtkowski",
"fullName": "Baptiste Wojtkowski",
"affiliation": "Heudiasyc Laboratory UMR CNRS 7253, Universit de Technologie de Compigne, 27008 Compiegne, Oise, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Pedro",
"surname": "Castillo",
"fullName": "Pedro Castillo",
"affiliation": "Heudiasyc laboratory, CNRS, 27051 Compiegne, Oise, France, 60200",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Indira",
"surname": "Thouvenin",
"fullName": "Indira Thouvenin",
"affiliation": "Heudiasyc Laboratory UMR CNRS 7253, Universit de Technologie de Compigne, 27008 Compiegne, Oise, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223466",
"title": "Nested immersion: Describing and classifying augmented virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223466/12OmNAle6GI",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3pgcic/2015/9473/0/9473a795",
"title": "Research on Web Intelligent Robot Based on Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/3pgcic/2015/9473a795/12OmNBDQbex",
"parentPublication": {
"id": "proceedings/3pgcic/2015/9473/0",
"title": "2015 10th International Conference on P2P, Parallel, Grid, Cloud and Internet Computing (3PGCIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciip/2015/0148/0/07414826",
"title": "A real-time ball trajectory follower using Robot Operating System",
"doi": null,
"abstractUrl": "/proceedings-article/iciip/2015/07414826/12OmNrJ11HQ",
"parentPublication": {
"id": "proceedings/iciip/2015/0148/0",
"title": "2015 Third International Conference on Image Information Processing (ICIIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892331",
"title": "Advertising perception with immersive virtual reality devices",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892331/12OmNvk7JO0",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223421",
"title": "Flying robot manipulation system using a virtual plane",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223421/12OmNxIzWOO",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2016/1437/0/1437a854",
"title": "Vision Based Autonomous Orientational Control for Aerial Manipulation via On-board FPGA",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2016/1437a854/12OmNxiKs6j",
"parentPublication": {
"id": "proceedings/cvprw/2016/1437/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoin/2022/1332/0/09687268",
"title": "Adaptive Drone Identification and Neutralization Scheme for Real-Time Military Tactical Operations",
"doi": null,
"abstractUrl": "/proceedings-article/icoin/2022/09687268/1AtQ3LKJ8Dm",
"parentPublication": {
"id": "proceedings/icoin/2022/1332/0",
"title": "2022 International Conference on Information Networking (ICOIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoin/2023/6268/0/10049031",
"title": "UAVs Reformation Approach Based on Packet Loss in GPS-Denied Environments",
"doi": null,
"abstractUrl": "/proceedings-article/icoin/2023/10049031/1KYsUWuEAp2",
"parentPublication": {
"id": "proceedings/icoin/2023/6268/0",
"title": "2023 International Conference on Information Networking (ICOIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a167",
"title": "DroneCamo: Modifying Human-Drone Comfort via Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a167/1gysj7RryWk",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090512",
"title": "Extracting and Transferring Hierarchical Knowledge to Robots using Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090512/1jIxv1GXrfa",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09736631",
"articleId": "1BN1UtLinTi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09737134",
"articleId": "1BQidsAhMnS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1BQidPzNjBS",
"doi": "10.1109/TVCG.2022.3160232",
"abstract": "We present an efficient locomotion technique that can reduce cybersickness through aligning the visual and vestibular induced self-motion illusion. Our locomotion technique stimulates proprioception consistent with the visual sense by intentional head motion, which includes both the head's translational movement and yaw rotation. A locomotion event is triggered by the hand-held controller together with an intended physical head motion simultaneously. Based on our method, we further explore the connections between the level of cybersickness and the velocity of self motion through a series of experiments. We first conduct Experiment 1 to investigate the cybersickness induced by different translation velocities using our method and then conduct Experiment 2 to investigate the cybersickness induced by different angular velocities. Our user studies from these two experiments reveal a new finding on the correlation between translation/angular velocities and the level of cybersickness. The cybersickness is greatest at the lowest velocity using our method, and the statistical analysis also indicates a possible U-shaped relation between the translation/angular velocity and cybersickness degree. Finally, we conduct Experiment 3 to evaluate the performances of our method and other commonly-used locomotion approaches, i.e., joystick-based steering and teleportation. The results show that our method can significantly reduce cybersickness compared with the joystick-based steering and obtain a higher presence compared with the teleportation. These advantages demonstrate that our method can be an optional locomotion solution for immersive VR applications using commercially available HMD suites only.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present an efficient locomotion technique that can reduce cybersickness through aligning the visual and vestibular induced self-motion illusion. Our locomotion technique stimulates proprioception consistent with the visual sense by intentional head motion, which includes both the head's translational movement and yaw rotation. A locomotion event is triggered by the hand-held controller together with an intended physical head motion simultaneously. Based on our method, we further explore the connections between the level of cybersickness and the velocity of self motion through a series of experiments. We first conduct Experiment 1 to investigate the cybersickness induced by different translation velocities using our method and then conduct Experiment 2 to investigate the cybersickness induced by different angular velocities. Our user studies from these two experiments reveal a new finding on the correlation between translation/angular velocities and the level of cybersickness. The cybersickness is greatest at the lowest velocity using our method, and the statistical analysis also indicates a possible U-shaped relation between the translation/angular velocity and cybersickness degree. Finally, we conduct Experiment 3 to evaluate the performances of our method and other commonly-used locomotion approaches, i.e., joystick-based steering and teleportation. The results show that our method can significantly reduce cybersickness compared with the joystick-based steering and obtain a higher presence compared with the teleportation. These advantages demonstrate that our method can be an optional locomotion solution for immersive VR applications using commercially available HMD suites only.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present an efficient locomotion technique that can reduce cybersickness through aligning the visual and vestibular induced self-motion illusion. Our locomotion technique stimulates proprioception consistent with the visual sense by intentional head motion, which includes both the head's translational movement and yaw rotation. A locomotion event is triggered by the hand-held controller together with an intended physical head motion simultaneously. Based on our method, we further explore the connections between the level of cybersickness and the velocity of self motion through a series of experiments. We first conduct Experiment 1 to investigate the cybersickness induced by different translation velocities using our method and then conduct Experiment 2 to investigate the cybersickness induced by different angular velocities. Our user studies from these two experiments reveal a new finding on the correlation between translation/angular velocities and the level of cybersickness. The cybersickness is greatest at the lowest velocity using our method, and the statistical analysis also indicates a possible U-shaped relation between the translation/angular velocity and cybersickness degree. Finally, we conduct Experiment 3 to evaluate the performances of our method and other commonly-used locomotion approaches, i.e., joystick-based steering and teleportation. The results show that our method can significantly reduce cybersickness compared with the joystick-based steering and obtain a higher presence compared with the teleportation. These advantages demonstrate that our method can be an optional locomotion solution for immersive VR applications using commercially available HMD suites only.",
"title": "Intentional Head-Motion Assisted Locomotion for Reducing Cybersickness",
"normalizedTitle": "Intentional Head-Motion Assisted Locomotion for Reducing Cybersickness",
"fno": "09737429",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Cybersickness",
"Legged Locomotion",
"Visualization",
"Navigation",
"Virtual Environments",
"Teleportation",
"Resists",
"Locomotion",
"Cybersickness",
"Head Motion",
"Translation",
"Rotation",
"Velocity",
"Presence"
],
"authors": [
{
"givenName": "Zehui",
"surname": "Lin",
"fullName": "Zehui Lin",
"affiliation": "of Computer Science, Peking University, 12465 Beijing, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiang",
"surname": "Gu",
"fullName": "Xiang Gu",
"affiliation": "computer science and technology, Peking University, Beijing, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sheng",
"surname": "Li",
"fullName": "Sheng Li",
"affiliation": "computer science and technology, Peking University, Beijing, Beijing, China, 100871",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhiming",
"surname": "Hu",
"fullName": "Zhiming Hu",
"affiliation": "of Computer Science, Peking University, 12465 Beijing, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guoping",
"surname": "Wang",
"fullName": "Guoping Wang",
"affiliation": "of Computer Science, Peking University, 12465 Beijing, Beijing, China, 100871",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2018/3365/0/08446130",
"title": "Rapid, Continuous Movement Between Nodes as an Accessible Virtual Reality Locomotion Technique",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446130/13bd1f3HvEx",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg201404596",
"title": "Dynamic Affordances in Embodied Interactive Systems: The Role of Display and Mode of Locomotion",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404596/13rRUxOve9K",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09744001",
"title": "Influence of user posture and virtual exercise on impression of locomotion during VR observation",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09744001/1C8BFV420lq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09894041",
"title": "Integrating Continuous and Teleporting VR Locomotion Into a Seamless ‘HyperJump’ Paradigm",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09894041/1GIqrCx8RCE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798164",
"title": "Reducing Cybersickness by Geometry Deformation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798164/1cJ1e7ULbji",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a608",
"title": "Walking and Teleportation in Wide-area Virtual Reality Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a608/1pysv8bIfrG",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a486",
"title": "Visual Techniques to Reduce Cybersickness in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a486/1tnXnofrJRu",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a345",
"title": "Spherical World in Miniature: Exploring the Tiny Planets Metaphor for Discrete Locomotion in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a345/1tuAuPBgHTi",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523894",
"title": "Head-Mounted Display with Increased Downward Field of View Improves Presence and Sense of Self-Location",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523894/1wpqkPb7CSY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a031",
"title": "Cybersickness Prediction from Integrated HMD’s Sensors: A Multimodal Deep Fusion Approach using Eye-tracking and Head-tracking Data",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a031/1yeCV8NQEE0",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09737134",
"articleId": "1BQidsAhMnS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09741325",
"articleId": "1C0jdavrcC4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1BQlG5eHGuI",
"name": "ttg555501-09737429s1-supp1-3160232.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09737429s1-supp1-3160232.mp4",
"extension": "mp4",
"size": "57 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1BN1Ujkoysg",
"doi": "10.1109/TVCG.2022.3160005",
"abstract": "In this work, we present a novel method called WSDesc to learn 3D local descriptors in a weakly supervised manner for robust point cloud registration. Our work builds upon recent 3D CNN-based descriptor extractors, which leverage a voxel-based representation to parameterize local geometry of 3D points. Instead of using a predefined fixed-size local support in voxelization, we propose to learn the optimal support in a data-driven manner. To this end, we design a novel differentiable voxelization layer that can back-propagate the gradient to the support size optimization. To train the extracted descriptors, we propose a novel registration loss based on the deviation from rigidity of 3D transformations, and the loss is weakly supervised by the prior knowledge that the input point clouds have partial overlap, without requiring ground-truth alignment information. Through extensive experiments, we show that our learned descriptors yield superior performance on existing geometric registration benchmarks.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this work, we present a novel method called WSDesc to learn 3D local descriptors in a weakly supervised manner for robust point cloud registration. Our work builds upon recent 3D CNN-based descriptor extractors, which leverage a voxel-based representation to parameterize local geometry of 3D points. Instead of using a predefined fixed-size local support in voxelization, we propose to learn the optimal support in a data-driven manner. To this end, we design a novel differentiable voxelization layer that can back-propagate the gradient to the support size optimization. To train the extracted descriptors, we propose a novel registration loss based on the deviation from rigidity of 3D transformations, and the loss is weakly supervised by the prior knowledge that the input point clouds have partial overlap, without requiring ground-truth alignment information. Through extensive experiments, we show that our learned descriptors yield superior performance on existing geometric registration benchmarks.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this work, we present a novel method called WSDesc to learn 3D local descriptors in a weakly supervised manner for robust point cloud registration. Our work builds upon recent 3D CNN-based descriptor extractors, which leverage a voxel-based representation to parameterize local geometry of 3D points. Instead of using a predefined fixed-size local support in voxelization, we propose to learn the optimal support in a data-driven manner. To this end, we design a novel differentiable voxelization layer that can back-propagate the gradient to the support size optimization. To train the extracted descriptors, we propose a novel registration loss based on the deviation from rigidity of 3D transformations, and the loss is weakly supervised by the prior knowledge that the input point clouds have partial overlap, without requiring ground-truth alignment information. Through extensive experiments, we show that our learned descriptors yield superior performance on existing geometric registration benchmarks.",
"title": "WSDesc: Weakly Supervised 3D Local Descriptor Learning for Point Cloud Registration",
"normalizedTitle": "WSDesc: Weakly Supervised 3D Local Descriptor Learning for Point Cloud Registration",
"fno": "09736452",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Three Dimensional Displays",
"Point Cloud Compression",
"Training",
"Geometry",
"Feature Extraction",
"Rigidity",
"Data Mining",
"Point Cloud",
"3 D Local Descriptor",
"Geometric Registration",
"Differentiable Voxelization",
"3 D CNN",
"Weak Supervision"
],
"authors": [
{
"givenName": "Lei",
"surname": "Li",
"fullName": "Lei Li",
"affiliation": "LIX, Ecole Polytechnique, 52830 Palaiseau, Essonne, France, 91128",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hongbo",
"surname": "Fu",
"fullName": "Hongbo Fu",
"affiliation": "School of Creative Media, City University of Hong Kong, 53025 Kowloon, Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Maks",
"surname": "Ovsjanikov",
"fullName": "Maks Ovsjanikov",
"affiliation": "Computer Science, Ecole Polytechnique, Paris, Ile de France, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2018/3788/0/08546214",
"title": "Fast Descriptor Extraction for Contextless 3D Registration Using a Fully Convolutional Network",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08546214/17D45WcjjPY",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200p5994",
"title": "HRegNet: A Hierarchical Network for Large-scale Outdoor LiDAR Point Cloud Registration",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200p5994/1BmFeO4ChZC",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/03/09775606",
"title": "Learning General and Distinctive 3D Local Deep Descriptors for Point Cloud Registration",
"doi": null,
"abstractUrl": "/journal/tp/2023/03/09775606/1Dqh2yvQWBi",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600o4910",
"title": "HybridCR: Weakly-Supervised 3D Point Cloud Semantic Segmentation via Hybrid Contrastive Regularization",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600o4910/1H1k2YPLPTG",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600l1830",
"title": "Weakly Supervised Segmentation on Outdoor 4D point clouds with Temporal Matching and Spatial Graph Propagation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600l1830/1H1kfGGzKtW",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10044259",
"title": "RoReg: Pairwise Point Cloud Registration with Oriented Descriptors and Local Rotations",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10044259/1KL6SJ4jOzS",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/9.346E265",
"title": "Weakly-supervised Point Cloud Instance Segmentation with Geometric Priors",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/9.346E265/1KxVbnA0Z5S",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600a582",
"title": "GaIA: Graphical Information Gain based Attention Network for Weakly Supervised Point Cloud Semantic Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600a582/1KxVfZiYZOg",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900l1748",
"title": "SpinNet: Learning a General Surface Descriptor for 3D Point Cloud Registration",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900l1748/1yeMfLYgNS8",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800b351",
"title": "3D Point Cloud Registration with Multi-Scale Architecture and Unsupervised Transfer Learning",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800b351/1zWEfYNkrg4",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09735308",
"articleId": "1BLn9j3xNQs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09736631",
"articleId": "1BN1UtLinTi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1BQie2k2zJu",
"name": "ttg555501-09736452s1-supp1-3160005.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09736452s1-supp1-3160005.pdf",
"extension": "pdf",
"size": "3.59 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1BN1UtLinTi",
"doi": "10.1109/TVCG.2022.3159799",
"abstract": "Head tracking in head-mounted displays (HMDs) enables users to explore a 360-degree virtual scene with free head movements. However, for seated use of HMDs such as users sitting on a chair or a couch, physically turning around 360-degree is not possible. Redirection techniques decouple tracked physical motion and virtual motion, allowing users to explore virtual environments with more flexibility. In seated situations with only head movements available, the difference of stimulus might cause the detection thresholds of rotation gains to differ from that of redirected walking. Therefore we present an experiment with a two-alternative forced-choice (2AFC) design to compare the thresholds for seated and standing situations. Results indicate that users are unable to discriminate rotation gains between 0.89 and 1.28, a smaller range compared to the standing condition. We further treated head amplification as an interaction technique and found that a gain of 2.5, though not a hard threshold, was near the largest gain that users consider applicable. Overall, our work aims to better understand human perception of rotation gains in seated VR and the results provide guidance for future design choices of its applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Head tracking in head-mounted displays (HMDs) enables users to explore a 360-degree virtual scene with free head movements. However, for seated use of HMDs such as users sitting on a chair or a couch, physically turning around 360-degree is not possible. Redirection techniques decouple tracked physical motion and virtual motion, allowing users to explore virtual environments with more flexibility. In seated situations with only head movements available, the difference of stimulus might cause the detection thresholds of rotation gains to differ from that of redirected walking. Therefore we present an experiment with a two-alternative forced-choice (2AFC) design to compare the thresholds for seated and standing situations. Results indicate that users are unable to discriminate rotation gains between 0.89 and 1.28, a smaller range compared to the standing condition. We further treated head amplification as an interaction technique and found that a gain of 2.5, though not a hard threshold, was near the largest gain that users consider applicable. Overall, our work aims to better understand human perception of rotation gains in seated VR and the results provide guidance for future design choices of its applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Head tracking in head-mounted displays (HMDs) enables users to explore a 360-degree virtual scene with free head movements. However, for seated use of HMDs such as users sitting on a chair or a couch, physically turning around 360-degree is not possible. Redirection techniques decouple tracked physical motion and virtual motion, allowing users to explore virtual environments with more flexibility. In seated situations with only head movements available, the difference of stimulus might cause the detection thresholds of rotation gains to differ from that of redirected walking. Therefore we present an experiment with a two-alternative forced-choice (2AFC) design to compare the thresholds for seated and standing situations. Results indicate that users are unable to discriminate rotation gains between 0.89 and 1.28, a smaller range compared to the standing condition. We further treated head amplification as an interaction technique and found that a gain of 2.5, though not a hard threshold, was near the largest gain that users consider applicable. Overall, our work aims to better understand human perception of rotation gains in seated VR and the results provide guidance for future design choices of its applications.",
"title": "On Rotation Gains Within and Beyond Perceptual Limitations for Seated VR",
"normalizedTitle": "On Rotation Gains Within and Beyond Perceptual Limitations for Seated VR",
"fno": "09736631",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Legged Locomotion",
"Task Analysis",
"Computer Science",
"Tracking",
"Visualization",
"Head Mounted Displays",
"User Experience",
"Rotation Gains",
"Amplified Head Rotation",
"Head Mounted Displays"
],
"authors": [
{
"givenName": "Chen",
"surname": "Wang",
"fullName": "Chen Wang",
"affiliation": "Computer Science and Technology, Tsinghua University, 12442 Beijing, Beijing, China, 100084",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Song-Hai",
"surname": "Zhang",
"fullName": "Song-Hai Zhang",
"affiliation": "Computer Science and Technology, Tsinghua University, Beijing, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yi-Zhuo",
"surname": "Zhang",
"fullName": "Yi-Zhuo Zhang",
"affiliation": "Department of Computer Science and Technology, Tsinghua University, 12442 Beijing, Beijing, China, 100084",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Stefanie",
"surname": "Zollmann",
"fullName": "Stefanie Zollmann",
"affiliation": "Computer Science, University of Otago, 2495 Dunedin, Otago, New Zealand, 9054",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shi-Min",
"surname": "Hu",
"fullName": "Shi-Min Hu",
"affiliation": "Computer Science and Technology, Tsinghua University, Beijing, Beijing, China, 100084",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892227",
"title": "Guided head rotation and amplified head rotation: Evaluating semi-natural travel and viewing techniques in virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892227/12OmNwseEYz",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2018/7459/0/745900a115",
"title": "Rethinking Redirected Walking: On the Use of Curvature Gains Beyond Perceptual Limitations and Revisiting Bending Gains",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2018/745900a115/17D45WK5AlG",
"parentPublication": {
"id": "proceedings/ismar/2018/7459/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a620",
"title": "Design of Mentally and Physically Demanding Tasks as Distractors of Rotation Gains",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a620/1CJdavNhwAw",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a074",
"title": "An Exploration of Hands-free Text Selection for Virtual Reality Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a074/1JrRaeV82L6",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10075482",
"title": "An Evaluation of View Rotation Techniques for Seated Navigation in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10075482/1LAuCOR3RE4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797994",
"title": "Redirecting View Rotation in Immersive Movies with Washout Filters",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797994/1cJ19tjOG2s",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090456",
"title": "On the Effect of Standing and Seated Viewing of 360° Videos on Subjective Quality Assessment",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090456/1jIxyayiDp6",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a071",
"title": "On Head Movements in Repeated 360° Video Quality Assessment for Standing and Seated Viewing on Head Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a071/1tnXBnBVgqc",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a358",
"title": "Revisiting Audiovisual Rotation Gains for Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a358/1tnXe22MFJm",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a486",
"title": "Visual Techniques to Reduce Cybersickness in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a486/1tnXnofrJRu",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09736452",
"articleId": "1BN1Ujkoysg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09737401",
"articleId": "1BQicsNYBbi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1BLn9j3xNQs",
"doi": "10.1109/TVCG.2022.3159114",
"abstract": "Feature related particle data analysis plays an important role in many scientific applications such as fluid simulations, cosmology simulations and molecular dynamics. Compared to conventional methods that use hand-crafted feature descriptors, some recent studies focus on transforming the data into a new latent space, where features are easier to be identified, compared and extracted. However, it is challenging to transform particle data into latent representations, since the convolution neural networks used in prior studies require the data presented in regular grids. In this paper, we adopt Geometric Convolution, a neural network building block designed for 3D point clouds, to create latent representations for scientific particle data. These latent representations capture both the particle positions and their physical attributes in the local neighborhood so that features can be extracted by clustering in the latent space, and tracked by applying tracking algorithms such as mean-shift. We validate the extracted features and tracking results from our approach using datasets from three applications and show that they are comparable to the methods that define hand-crafted features for each specific dataset.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Feature related particle data analysis plays an important role in many scientific applications such as fluid simulations, cosmology simulations and molecular dynamics. Compared to conventional methods that use hand-crafted feature descriptors, some recent studies focus on transforming the data into a new latent space, where features are easier to be identified, compared and extracted. However, it is challenging to transform particle data into latent representations, since the convolution neural networks used in prior studies require the data presented in regular grids. In this paper, we adopt Geometric Convolution, a neural network building block designed for 3D point clouds, to create latent representations for scientific particle data. These latent representations capture both the particle positions and their physical attributes in the local neighborhood so that features can be extracted by clustering in the latent space, and tracked by applying tracking algorithms such as mean-shift. We validate the extracted features and tracking results from our approach using datasets from three applications and show that they are comparable to the methods that define hand-crafted features for each specific dataset.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Feature related particle data analysis plays an important role in many scientific applications such as fluid simulations, cosmology simulations and molecular dynamics. Compared to conventional methods that use hand-crafted feature descriptors, some recent studies focus on transforming the data into a new latent space, where features are easier to be identified, compared and extracted. However, it is challenging to transform particle data into latent representations, since the convolution neural networks used in prior studies require the data presented in regular grids. In this paper, we adopt Geometric Convolution, a neural network building block designed for 3D point clouds, to create latent representations for scientific particle data. These latent representations capture both the particle positions and their physical attributes in the local neighborhood so that features can be extracted by clustering in the latent space, and tracked by applying tracking algorithms such as mean-shift. We validate the extracted features and tracking results from our approach using datasets from three applications and show that they are comparable to the methods that define hand-crafted features for each specific dataset.",
"title": "Local Latent Representation based on Geometric Convolution for Particle Data Feature Exploration",
"normalizedTitle": "Local Latent Representation based on Geometric Convolution for Particle Data Feature Exploration",
"fno": "09735308",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Feature Extraction",
"Neural Networks",
"Point Cloud Compression",
"Data Visualization",
"Convolution",
"Three Dimensional Displays",
"Kernel",
"Data Transformation",
"Particle Data",
"Feature Extraction And Tracking",
"Deep Learning"
],
"authors": [
{
"givenName": "Haoyu",
"surname": "Li",
"fullName": "Haoyu Li",
"affiliation": "Computer Science and Engineering, The Ohio State University, 2647 Columbus, Ohio, United States, 43210",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Han-Wei",
"surname": "Shen",
"fullName": "Han-Wei Shen",
"affiliation": "Department of Computer Science and Engineering, The Ohio State University, Columbus, Ohio, United States",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2016/1437/0/1437b359",
"title": "3D Convolutional Networks-Based Mitotic Event Detection in Time-Lapse Phase Contrast Microscopy Image Sequences of Stem Cell Populations",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2016/1437b359/12OmNyYm2D8",
"parentPublication": {
"id": "proceedings/cvprw/2016/1437/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200e945",
"title": "Adaptive Graph Convolution for Point Cloud Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200e945/1BmFzgP28iA",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600g292",
"title": "POCO: Point Convolution for Surface Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600g292/1H0KAZrauEo",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600f533",
"title": "Learning a Structured Latent Space for Unsupervised Point Cloud Completion",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600f533/1H0KOsU2FZC",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10024001",
"title": "AGConv: Adaptive Graph Convolution on 3D Point Clouds",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10024001/1K9spf0w0Ug",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2022/5670/0/567000a166",
"title": "GNPM: Geometric-Aware Neural Parametric Models",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2022/567000a166/1KYslWC3W7u",
"parentPublication": {
"id": "proceedings/3dv/2022/5670/0",
"title": "2022 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300k0288",
"title": "Graph Attention Convolution for Point Cloud Semantic Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300k0288/1gyrHUpmp68",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300i180",
"title": "Geometric Disentanglement for Generative Latent Shape Models",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300i180/1hQqibv9Neg",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093505",
"title": "Blended Convolution and Synthesis for Efficient Discrimination of 3D Shapes",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093505/1jPbfCoY1IQ",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700d148",
"title": "Cross-Domain Latent Modulation for Variational Transfer Learning",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700d148/1uqGAEg8Jaw",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09733942",
"articleId": "1BJIbG1OGqc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09736452",
"articleId": "1BN1Ujkoysg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1BN1TTJERlC",
"name": "ttg555501-09735308s1-supp3-3159114.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09735308s1-supp3-3159114.mp4",
"extension": "mp4",
"size": "4.37 MB",
"__typename": "WebExtraType"
},
{
"id": "1BN1TNc24tq",
"name": "ttg555501-09735308s1-supp2-3159114.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09735308s1-supp2-3159114.mp4",
"extension": "mp4",
"size": "4.2 MB",
"__typename": "WebExtraType"
},
{
"id": "1BN1TYNSOl2",
"name": "ttg555501-09735308s1-supp4-3159114.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09735308s1-supp4-3159114.mp4",
"extension": "mp4",
"size": "4.2 MB",
"__typename": "WebExtraType"
},
{
"id": "1BN1U3YDxjW",
"name": "ttg555501-09735308s1-supp1-3159114.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09735308s1-supp1-3159114.mp4",
"extension": "mp4",
"size": "4.35 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1BJIbG1OGqc",
"doi": "10.1109/TVCG.2022.3158236",
"abstract": "We present the results of a scientometric analysis of 30 years of IEEE VIS publications between 1990-2020, in which we conducted a multifaceted analysis of interdisciplinary collaboration and gender composition among authors. To this end, we curated BiblioVIS, a bibliometric dataset that contains rich metadata about IEEE VIS publications, including 3032 papers and 6113 authors. One of the main factors differentiating BiblioVIS from similar datasets is the authors' gender and discipline data, which we inferred through iterative rounds of computational and manual processes. Our analysis shows that, by and large, inter-institutional and interdisciplinary collaboration has been steadily growing over the past 30 years. However, interdisciplinary research was mainly between a few fields, including Computer Science, Engineering and Technology, and Medicine and Health disciplines. Our analysis of gender shows steady growth in women's authorship. Despite this growth, the gender distribution is still highly skewed, with men dominating (~75%) of this space. Our predictive analysis of gender balance shows that if the current trends continue, gender parity in the visualization field will not be reached before the third quarter of the century (~2070). Our primary goal in this work is to call the visualization community's attention to the critical topics of collaboration, diversity, and gender. Our research offers critical insights through the lens of diversity and gender to help accelerate progress towards a more diverse and representative research community.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present the results of a scientometric analysis of 30 years of IEEE VIS publications between 1990-2020, in which we conducted a multifaceted analysis of interdisciplinary collaboration and gender composition among authors. To this end, we curated BiblioVIS, a bibliometric dataset that contains rich metadata about IEEE VIS publications, including 3032 papers and 6113 authors. One of the main factors differentiating BiblioVIS from similar datasets is the authors' gender and discipline data, which we inferred through iterative rounds of computational and manual processes. Our analysis shows that, by and large, inter-institutional and interdisciplinary collaboration has been steadily growing over the past 30 years. However, interdisciplinary research was mainly between a few fields, including Computer Science, Engineering and Technology, and Medicine and Health disciplines. Our analysis of gender shows steady growth in women's authorship. Despite this growth, the gender distribution is still highly skewed, with men dominating (~75%) of this space. Our predictive analysis of gender balance shows that if the current trends continue, gender parity in the visualization field will not be reached before the third quarter of the century (~2070). Our primary goal in this work is to call the visualization community's attention to the critical topics of collaboration, diversity, and gender. Our research offers critical insights through the lens of diversity and gender to help accelerate progress towards a more diverse and representative research community.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present the results of a scientometric analysis of 30 years of IEEE VIS publications between 1990-2020, in which we conducted a multifaceted analysis of interdisciplinary collaboration and gender composition among authors. To this end, we curated BiblioVIS, a bibliometric dataset that contains rich metadata about IEEE VIS publications, including 3032 papers and 6113 authors. One of the main factors differentiating BiblioVIS from similar datasets is the authors' gender and discipline data, which we inferred through iterative rounds of computational and manual processes. Our analysis shows that, by and large, inter-institutional and interdisciplinary collaboration has been steadily growing over the past 30 years. However, interdisciplinary research was mainly between a few fields, including Computer Science, Engineering and Technology, and Medicine and Health disciplines. Our analysis of gender shows steady growth in women's authorship. Despite this growth, the gender distribution is still highly skewed, with men dominating (~75%) of this space. Our predictive analysis of gender balance shows that if the current trends continue, gender parity in the visualization field will not be reached before the third quarter of the century (~2070). Our primary goal in this work is to call the visualization community's attention to the critical topics of collaboration, diversity, and gender. Our research offers critical insights through the lens of diversity and gender to help accelerate progress towards a more diverse and representative research community.",
"title": "Scientometric Analysis of Interdisciplinary Collaboration and Gender Trends in 30 Years of IEEE VIS Publications",
"normalizedTitle": "Scientometric Analysis of Interdisciplinary Collaboration and Gender Trends in 30 Years of IEEE VIS Publications",
"fno": "09733942",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Collaboration",
"Data Visualization",
"Conferences",
"Bibliometrics",
"Market Research",
"Visual Analytics",
"Productivity",
"Scientometric",
"IEEE VIS Publications",
"Gender",
"Co Authorship",
"Collaboration",
"Interdisciplinary",
"Inter Institutional"
],
"authors": [
{
"givenName": "Ali",
"surname": "Sarvghad",
"fullName": "Ali Sarvghad",
"affiliation": "Collage of Information and Computer Sciences, University of Massachusetts Amherst, Amherst, Massachusetts, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rolando",
"surname": "Franqui-Nadal",
"fullName": "Rolando Franqui-Nadal",
"affiliation": "Collage of Information and Computer Sciences, University of Massachusetts Amherst, Amherst, Massachusetts, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rebecca",
"surname": "Reznik-Zellen",
"fullName": "Rebecca Reznik-Zellen",
"affiliation": "Collage of Information and Computer Sciences, University of Massachusetts Amherst, Amherst, Massachusetts, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ria",
"surname": "Chawla",
"fullName": "Ria Chawla",
"affiliation": "Collage of Information and Computer Sciences, University of Massachusetts Amherst, Amherst, Massachusetts, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Narges",
"surname": "Mahyar",
"fullName": "Narges Mahyar",
"affiliation": "College of Information and Computer Sciences, University of Massachusetts Amherst, 14707 Amherst, Massachusetts, United States",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/hicss/2009/3450/0/01-05-01",
"title": "Digital Cross-Organizational and Cross-Border Collaboration: A Scientometric Study",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2009/01-05-01/12OmNy6Zs1C",
"parentPublication": {
"id": "proceedings/hicss/2009/3450/0",
"title": "2009 42nd Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2004/2177/0/21770972",
"title": "Visualizing Interdisciplinary Citations to and from Information and Library Science Publications",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2004/21770972/12OmNyS6RKR",
"parentPublication": {
"id": "proceedings/iv/2004/2177/0",
"title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asonam/2016/2846/0/07752307",
"title": "The scientometrics of successful women in science",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2016/07752307/12OmNyoiYVM",
"parentPublication": {
"id": "proceedings/asonam/2016/2846/0",
"title": "2016 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/cc/2014/03/06808484",
"title": "A Scientometric Analysis of Cloud Computing Literature",
"doi": null,
"abstractUrl": "/journal/cc/2014/03/06808484/13rRUwdIOYO",
"parentPublication": {
"id": "trans/cc",
"title": "IEEE Transactions on Cloud Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2017/02/mcs2017020082",
"title": "A report from VIS 2016",
"doi": null,
"abstractUrl": "/magazine/cs/2017/02/mcs2017020082/13rRUwwaKmg",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2017/2715/0/08258573",
"title": "Discovering the interdisciplinary nature of big data research",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2017/08258573/17D45VsBTTI",
"parentPublication": {
"id": "proceedings/big-data/2017/2715/0",
"title": "2017 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440852",
"title": "VIS Author Profiles: Interactive Descriptions of Publication Records Combining Text and Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440852/17D45WXIkBi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09903512",
"title": "Thirty-Two Years of IEEE VIS: Authors, Fields of Study and Citations",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09903512/1GZol4dym8U",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2020/6251/0/09377970",
"title": "A bibliometric network analysis of Deep Learning publications applied into legal documents",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2020/09377970/1s64Bs1mh6E",
"parentPublication": {
"id": "proceedings/big-data/2020/6251/0",
"title": "2020 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552840",
"title": "Gender in 30 Years of IEEE Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552840/1xic2GL1FC0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09733261",
"articleId": "1BENJyPkx5S",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09735308",
"articleId": "1BLn9j3xNQs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1BENJyPkx5S",
"doi": "10.1109/TVCG.2022.3158609",
"abstract": "In this work, we proposed a new out-of-place resetting strategy that guides users to optimal physical locations with the most potential for free movement and a smaller amount of resetting required for their further movements. For this purpose, we calculate a heat map of the walking area according to the average walking distance using a simulation of the used RDW algorithm. Based on this heat map we identify the most suitable position for a one-step reset within a predefined searching range and use the one as the reset point. The results show that our method increases the average moving distance within one cycle of resetting. Furthermore, our resetting method can be applied to any physical area with obstacles. That means that RDW methods that were not suitable for such environments (e.g. Steer to Center) combined with our resetting can also be extended to such complex walking areas. In addition, we also present a resetting user interface to instruct users to move the nearby point, by using light spots to bring user a sense of relative displacement while the virtual scenario is still.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this work, we proposed a new out-of-place resetting strategy that guides users to optimal physical locations with the most potential for free movement and a smaller amount of resetting required for their further movements. For this purpose, we calculate a heat map of the walking area according to the average walking distance using a simulation of the used RDW algorithm. Based on this heat map we identify the most suitable position for a one-step reset within a predefined searching range and use the one as the reset point. The results show that our method increases the average moving distance within one cycle of resetting. Furthermore, our resetting method can be applied to any physical area with obstacles. That means that RDW methods that were not suitable for such environments (e.g. Steer to Center) combined with our resetting can also be extended to such complex walking areas. In addition, we also present a resetting user interface to instruct users to move the nearby point, by using light spots to bring user a sense of relative displacement while the virtual scenario is still.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this work, we proposed a new out-of-place resetting strategy that guides users to optimal physical locations with the most potential for free movement and a smaller amount of resetting required for their further movements. For this purpose, we calculate a heat map of the walking area according to the average walking distance using a simulation of the used RDW algorithm. Based on this heat map we identify the most suitable position for a one-step reset within a predefined searching range and use the one as the reset point. The results show that our method increases the average moving distance within one cycle of resetting. Furthermore, our resetting method can be applied to any physical area with obstacles. That means that RDW methods that were not suitable for such environments (e.g. Steer to Center) combined with our resetting can also be extended to such complex walking areas. In addition, we also present a resetting user interface to instruct users to move the nearby point, by using light spots to bring user a sense of relative displacement while the virtual scenario is still.",
"title": "One-step out-of-place resetting for redirected walking in VR",
"normalizedTitle": "One-step out-of-place resetting for redirected walking in VR",
"fno": "09733261",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Legged Locomotion",
"Virtual Environments",
"Aerospace Electronics",
"User Interfaces",
"Tracking",
"Teleportation",
"Reinforcement Learning",
"Redirected Walking",
"Out Of Place Resetting And Two Arrows Indicator"
],
"authors": [
{
"givenName": "Song-Hai",
"surname": "Zhang",
"fullName": "Song-Hai Zhang",
"affiliation": "Computer Science and Technology, Tsinghua University, Beijing, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chia-Hao",
"surname": "Chen",
"fullName": "Chia-Hao Chen",
"affiliation": "Computer Science and Technology, Tsinghua University, 12442 Beijing, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Stefanie",
"surname": "Zollmann",
"fullName": "Stefanie Zollmann",
"affiliation": "Computer Science, University of Otago, 2495 Dunedin, Otago, New Zealand, 9054",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892373",
"title": "Application of redirected walking in room-scale VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892373/12OmNxG1ySA",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446263",
"title": "Mobius Walker: Pitch and Roll Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446263/13bd1gJ1v07",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2018/7459/0/745900a115",
"title": "Rethinking Redirected Walking: On the Use of Curvature Gains Beyond Perceptual Limitations and Revisiting Bending Gains",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2018/745900a115/17D45WK5AlG",
"parentPublication": {
"id": "proceedings/ismar/2018/7459/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08645818",
"title": "Multi-User Redirected Walking and Resetting Using Artificial Potential Fields",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08645818/17PYEiVyc2v",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a655",
"title": "Optimal Pose Guided Redirected Walking with Pose Score Precomputation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a655/1CJbHdnVzd6",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10049511",
"title": "Redirected Walking On Omnidirectional Treadmill",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10049511/1KYoAYFd0m4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10058042",
"title": "Multi-User Redirected Walking in Separate Physical Spaces for Online VR Scenarios",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10058042/1LbFn8YmYjC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a053",
"title": "Redirected Walking Based on Historical User Walking Data",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a053/1MNgUnNG7Ju",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a201",
"title": "Evaluate Optimal Redirected Walking Planning Using Reinforcement Learning",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a201/1pBMkbxS3F6",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/04/09669131",
"title": "Adaptive Optimization Algorithm for Resetting Techniques in Obstacle-Ridden Environments",
"doi": null,
"abstractUrl": "/journal/tg/2023/04/09669131/1zTg06F4VTq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09732236",
"articleId": "1BBtNDKgNFe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09733942",
"articleId": "1BJIbG1OGqc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1BBtNDKgNFe",
"doi": "10.1109/TVCG.2022.3158093",
"abstract": "Scatterplots overlayed with a nonlinear model enable visual estimation of model-data fit. Although statistical fit is calculated using vertical distances, viewers subjective fit is often based on shortest distances. Our results suggest that adding vertical lines (lollipops) supports more accurate fit estimation in the steep area of model curves (https://osf.io/fybx5/).",
"abstracts": [
{
"abstractType": "Regular",
"content": "Scatterplots overlayed with a nonlinear model enable visual estimation of model-data fit. Although statistical fit is calculated using vertical distances, viewers subjective fit is often based on shortest distances. Our results suggest that adding vertical lines (lollipops) supports more accurate fit estimation in the steep area of model curves (https://osf.io/fybx5/).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Scatterplots overlayed with a nonlinear model enable visual estimation of model-data fit. Although statistical fit is calculated using vertical distances, viewers subjective fit is often based on shortest distances. Our results suggest that adding vertical lines (lollipops) supports more accurate fit estimation in the steep area of model curves (https://osf.io/fybx5/).",
"title": "Lollipops Help Align Visual and Statistical Fit Estimates in Scatterplots with Nonlinear Models",
"normalizedTitle": "Lollipops Help Align Visual and Statistical Fit Estimates in Scatterplots with Nonlinear Models",
"fno": "09732236",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Models",
"Estimation",
"Computational Modeling",
"Visualization",
"Data Visualization",
"Psychology",
"Task Analysis",
"Information Visualization",
"Perception And Psychophysics",
"Theory And Models"
],
"authors": [
{
"givenName": "Daniel",
"surname": "Reimann",
"fullName": "Daniel Reimann",
"affiliation": "Psychology, FernUniversität in Hagen, 58097 Hagen, NRW, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nilam",
"surname": "Ram",
"fullName": "Nilam Ram",
"affiliation": "Departments of Communication and Psychology, Stanford University, 6429 Stanford, California, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Robert",
"surname": "Gaschler",
"fullName": "Robert Gaschler",
"affiliation": "Psychology, FernUniversität in Hagen, 58097 Hagen, NRW, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2016/0842/0/07460052",
"title": "A hybrid projection to widen the vertical field of view with large screens to improve the perception of personal space in architectural project review",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460052/12OmNC4eSwS",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904433",
"title": "Evaluating the Use of Uncertainty Visualisations for Imputations of Data Missing At Random in Scatterplots",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904433/1H1gkkbe0hy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08805427",
"title": "Biased Average Position Estimates in Line and Bar Graphs: Underestimation, Overestimation, and Perceptual Pull",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08805427/1cG4xtnomys",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2019/2607/2/260702a036",
"title": "A Proposal of SDN-FIT System to Evaluate Wide-Area Distributed Applications Based on Exhaustive FIT Scenario Generation",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2019/260702a036/1cYipFcvorK",
"parentPublication": {
"id": "compsac/2019/2607/2",
"title": "2019 IEEE 43rd Annual Computer Software and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0/298000a729",
"title": "Delayed Best-Fit Task Scheduling to Reduce Energy Consumption in Cloud Data Centers",
"doi": null,
"abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2019/298000a729/1ehBFLparPq",
"parentPublication": {
"id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0",
"title": "2019 International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933670",
"title": "Disentangled Representation of Data Distributions in Scatterplots",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933670/1fTgGJvQB9e",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2019/2506/0/250600a335",
"title": "SizeNet: Weakly Supervised Learning of Visual Size and Fit in Fashion Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2019/250600a335/1iTvuQ1sGnm",
"parentPublication": {
"id": "proceedings/cvprw/2019/2506/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09226404",
"title": "Evaluation of Sampling Methods for Scatterplots",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09226404/1nYqk0TjyeY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/09/09325067",
"title": "Visual Model Fit Estimation in Scatterplots: Influence of Amount and Decentering of Noise",
"doi": null,
"abstractUrl": "/journal/tg/2021/09/09325067/1qnQCmbSzUA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a095",
"title": "Detection Thresholds with Joint Horizontal and Vertical Gains in Redirected Jumping",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a095/1tuAwxIGXQI",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09729627",
"articleId": "1Bya9m82uzu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09733261",
"articleId": "1BENJyPkx5S",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Bya8dlokw0",
"doi": "10.1109/TVCG.2022.3157061",
"abstract": "We propose a self-supervised method for partial point set registration. Although recently proposed learning-based methods demonstrate impressive registration performance on full shape observations, these methods often suffer from performance degradation when dealing with partial shapes. To bridge the performance gap between partial and full point set registration, we propose to incorporate a shape completion network to benefit the registration process. To achieve this, we introduce a learnable latent code for each pair of shapes, which can be regarded as the geometric encoding of the target shape. By doing so, our model does not require an explicit feature embedding network to learn the feature encodings. More importantly, both our shape completion and point set registration networks take the shared latent codes as input, which are optimized simultaneously with the parameters of two decoder networks in the training process. Therefore, the point set registration process can benefit from the joint optimization process of latent codes, which are enforced to represent the information of full shapes instead of partial ones. In the inference stage, we fix the network parameters and optimize the latent codes to obtain the optimal shape completion and registration results. Our proposed method is purely unsupervised and does not require ground truth supervision. Experiments on the ModelNet40 dataset demonstrate the effectiveness of our model for partial point set registration.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a self-supervised method for partial point set registration. Although recently proposed learning-based methods demonstrate impressive registration performance on full shape observations, these methods often suffer from performance degradation when dealing with partial shapes. To bridge the performance gap between partial and full point set registration, we propose to incorporate a shape completion network to benefit the registration process. To achieve this, we introduce a learnable latent code for each pair of shapes, which can be regarded as the geometric encoding of the target shape. By doing so, our model does not require an explicit feature embedding network to learn the feature encodings. More importantly, both our shape completion and point set registration networks take the shared latent codes as input, which are optimized simultaneously with the parameters of two decoder networks in the training process. Therefore, the point set registration process can benefit from the joint optimization process of latent codes, which are enforced to represent the information of full shapes instead of partial ones. In the inference stage, we fix the network parameters and optimize the latent codes to obtain the optimal shape completion and registration results. Our proposed method is purely unsupervised and does not require ground truth supervision. Experiments on the ModelNet40 dataset demonstrate the effectiveness of our model for partial point set registration.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a self-supervised method for partial point set registration. Although recently proposed learning-based methods demonstrate impressive registration performance on full shape observations, these methods often suffer from performance degradation when dealing with partial shapes. To bridge the performance gap between partial and full point set registration, we propose to incorporate a shape completion network to benefit the registration process. To achieve this, we introduce a learnable latent code for each pair of shapes, which can be regarded as the geometric encoding of the target shape. By doing so, our model does not require an explicit feature embedding network to learn the feature encodings. More importantly, both our shape completion and point set registration networks take the shared latent codes as input, which are optimized simultaneously with the parameters of two decoder networks in the training process. Therefore, the point set registration process can benefit from the joint optimization process of latent codes, which are enforced to represent the information of full shapes instead of partial ones. In the inference stage, we fix the network parameters and optimize the latent codes to obtain the optimal shape completion and registration results. Our proposed method is purely unsupervised and does not require ground truth supervision. Experiments on the ModelNet40 dataset demonstrate the effectiveness of our model for partial point set registration.",
"title": "Unsupervised Category-Specific Partial Point Set Registration via Joint Shape Completion and Registration",
"normalizedTitle": "Unsupervised Category-Specific Partial Point Set Registration via Joint Shape Completion and Registration",
"fno": "09729524",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Shape",
"Three Dimensional Displays",
"Codes",
"Optimization",
"Task Analysis",
"Point Cloud Compression",
"Training",
"Point Set Registration",
"Partial Registration",
"Unsupervised Learning",
"Shape Completion"
],
"authors": [
{
"givenName": "Xiang",
"surname": "Li",
"fullName": "Xiang Li",
"affiliation": "Tandon School of Engineering, New York University, 5894 New York, New York, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lingjing",
"surname": "Wang",
"fullName": "Lingjing Wang",
"affiliation": "Electrical Engineering, New York University - Abu Dhabi Campus, 167632 Abu Dhabi, Abu Dhabi, United Arab Emirates",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yi",
"surname": "Fang",
"fullName": "Yi Fang",
"affiliation": "Electrical and Computer Engineering, New York University Tandon School of Engineering, 34242 Brooklyn, New York, United States, 11201",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tp/2023/01/09705149",
"title": "STORM: Structure-Based Overlap Matching for Partial Point Cloud Registration",
"doi": null,
"abstractUrl": "/journal/tp/2023/01/09705149/1AII6wed0Bi",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200f663",
"title": "DeepPRO: Deep Partial Point Cloud Registration of Objects",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200f663/1BmGoTRdyCY",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200d112",
"title": "OMNet: Learning Overlapping Mask for Partial-to-Partial Point Cloud Registration",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200d112/1BmH817i3jq",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200f806",
"title": "3D Shape Generation and Completion through Point-Voxel Diffusion",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200f806/1BmHiEgI4q4",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ai/5555/01/09866792",
"title": "Partial Point Cloud Registration with Deep Local Feature",
"doi": null,
"abstractUrl": "/journal/ai/5555/01/09866792/1G7UlGeNaiA",
"parentPublication": {
"id": "trans/ai",
"title": "IEEE Transactions on Artificial Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859918",
"title": "PDP-NET: Patch-Based Dual-Path Network for Point Cloud Completion",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859918/1G9DLvbZp60",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09860002",
"title": "Partial-to-Partial Point Cloud Registration Based on Multi-Level Semantic-Structural Cognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09860002/1G9EKd6az6g",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600f533",
"title": "Learning a Structured Latent Space for Unsupervised Point Cloud Completion",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600f533/1H0KOsU2FZC",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10093999",
"title": "ANISE: Assembly-based Neural Implicit Surface rEconstruction",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10093999/1M80HueHnJS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800a342",
"title": "DeepBBS: Deep Best Buddies for Point Cloud Registration",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800a342/1zWE3qPVGLe",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09727090",
"articleId": "1Brwons3Oa4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09729540",
"articleId": "1Bya8YD1tUk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Bya8YD1tUk",
"doi": "10.1109/TVCG.2022.3156734",
"abstract": "The use of Virtual Reality (VR) technology to train professionals has increased over the years due to its advantages over traditional training. This paper presents a study comparing the effectiveness of a Virtual Environment (VE) and a Real Environment (RE) designed to train firefighters. To measure the effectiveness of the environments, a new method based on participants Heart Rate Variability (HRV) was used. This method was complemented with self-reports, in the form of questionnaires, of fatigue, stress, sense of presence, and cybersickness. An additional questionnaire was used to measure and compare knowledge transfer enabled by the environments. The results from HRV analysis indicated that participants were under physiological stress in both environments, albeit with less intensity on the VE. Regarding reported fatigue and stress, the results showed that none of the environments increased such variables. The results of knowledge transfer showed that the VE obtained a significant increase while the RE obtained a positive but non-significant increase (median values, VE: before 4 after 7, p = .003; RE: before 4 after 5, p =.375). Lastly, the results of presence and cybersickness suggested that participants experienced high overall presence and no cybersickness. Considering all results, the authors conclude that the VE provided effective training but that its effectiveness was lower than that of the RE.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The use of Virtual Reality (VR) technology to train professionals has increased over the years due to its advantages over traditional training. This paper presents a study comparing the effectiveness of a Virtual Environment (VE) and a Real Environment (RE) designed to train firefighters. To measure the effectiveness of the environments, a new method based on participants Heart Rate Variability (HRV) was used. This method was complemented with self-reports, in the form of questionnaires, of fatigue, stress, sense of presence, and cybersickness. An additional questionnaire was used to measure and compare knowledge transfer enabled by the environments. The results from HRV analysis indicated that participants were under physiological stress in both environments, albeit with less intensity on the VE. Regarding reported fatigue and stress, the results showed that none of the environments increased such variables. The results of knowledge transfer showed that the VE obtained a significant increase while the RE obtained a positive but non-significant increase (median values, VE: before 4 after 7, p = .003; RE: before 4 after 5, p =.375). Lastly, the results of presence and cybersickness suggested that participants experienced high overall presence and no cybersickness. Considering all results, the authors conclude that the VE provided effective training but that its effectiveness was lower than that of the RE.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The use of Virtual Reality (VR) technology to train professionals has increased over the years due to its advantages over traditional training. This paper presents a study comparing the effectiveness of a Virtual Environment (VE) and a Real Environment (RE) designed to train firefighters. To measure the effectiveness of the environments, a new method based on participants Heart Rate Variability (HRV) was used. This method was complemented with self-reports, in the form of questionnaires, of fatigue, stress, sense of presence, and cybersickness. An additional questionnaire was used to measure and compare knowledge transfer enabled by the environments. The results from HRV analysis indicated that participants were under physiological stress in both environments, albeit with less intensity on the VE. Regarding reported fatigue and stress, the results showed that none of the environments increased such variables. The results of knowledge transfer showed that the VE obtained a significant increase while the RE obtained a positive but non-significant increase (median values, VE: before 4 after 7, p = .003; RE: before 4 after 5, p =.375). Lastly, the results of presence and cybersickness suggested that participants experienced high overall presence and no cybersickness. Considering all results, the authors conclude that the VE provided effective training but that its effectiveness was lower than that of the RE.",
"title": "Using Heart Rate Variability for Comparing the Effectiveness of Virtual vs Real Training Environments for Firefighters",
"normalizedTitle": "Using Heart Rate Variability for Comparing the Effectiveness of Virtual vs Real Training Environments for Firefighters",
"fno": "09729540",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Training",
"Particle Measurements",
"Atmospheric Measurements",
"Heart Rate Variability",
"Stress",
"Physiology",
"Task Analysis",
"Computer Graphics",
"Virtual Reality",
"Professional Training",
"Biofeedback"
],
"authors": [
{
"givenName": "David",
"surname": "Narciso",
"fullName": "David Narciso",
"affiliation": "Engineering Department, Universidade de Tras-os-Montes e Alto Douro Escola de Ciencias e Tecnologia, 386363 Vila Real, Vila Real, Portugal, 5000-801",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Miguel",
"surname": "Melo",
"fullName": "Miguel Melo",
"affiliation": "CSIG, Instituto de Engenharia de Sistemas e Computadores Tecnologia e Ciencia, 112047 Porto, Porto, Portugal, 4200-465",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Susana",
"surname": "Rodrigues",
"fullName": "Susana Rodrigues",
"affiliation": "CSIG, Instituto de Engenharia de Sistemas e Computadores Tecnologia e Ciencia, 112047 Porto, Porto, Portugal",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Joao Paulo",
"surname": "Cunha",
"fullName": "Joao Paulo Cunha",
"affiliation": "Departamento de Engenharia Eletrotcnica e de Computadores, Faculdade de Engenharia da Universidade do Porto, Porto, Porto, Portugal",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jose",
"surname": "Vasconcelos-Raposo",
"fullName": "Jose Vasconcelos-Raposo",
"affiliation": "CSIG, Instituto de Engenharia de Sistemas e Computadores Tecnologia e Ciencia, 112047 Porto, Porto, Portugal",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Maximino Esteves",
"surname": "Bessa",
"fullName": "Maximino Esteves Bessa",
"affiliation": "Engineering, UTAD, Vila Real, Trs-os-Montes, Portugal, 5001-801",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/isspit/2010/9992/0/05711762",
"title": "Development of full-featured ECG system for visual stress induced heart rate variability (HRV) assessment",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2010/05711762/12OmNApu5BB",
"parentPublication": {
"id": "proceedings/isspit/2010/9992/0",
"title": "2010 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2013/5048/0/5048a289",
"title": "Heart Rate Variability and Skin Conductance Biofeedback: A Triple-Blind Randomized Controlled Study",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2013/5048a289/12OmNAtK4n3",
"parentPublication": {
"id": "proceedings/acii/2013/5048/0",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2015/7983/0/07367627",
"title": "Continuous monitoring of stress on smartphone using heart rate variability",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2015/07367627/12OmNC4wtEZ",
"parentPublication": {
"id": "proceedings/bibe/2015/7983/0",
"title": "2015 IEEE 15th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2016/3834/0/3834a176",
"title": "Workload Induces Changes in Hemodynamics, Respiratory Rate and Heart Rate Variability",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2016/3834a176/12OmNCyBXi0",
"parentPublication": {
"id": "proceedings/bibe/2016/3834/0",
"title": "2016 IEEE 16th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2013/4892/0/4892a430",
"title": "Investigating Technostress in situ: Understanding the Day and the Life of a Knowledge Worker Using Heart Rate Variability",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2013/4892a430/12OmNqGiu0R",
"parentPublication": {
"id": "proceedings/hicss/2013/4892/0",
"title": "2013 46th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2013/5048/0/5048a362",
"title": "Measuring Emotional Arousal for Online Applications: Evaluation of Ultra-short Term Heart Rate Variability Measures",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2013/5048a362/12OmNwCaCyL",
"parentPublication": {
"id": "proceedings/acii/2013/5048/0",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichi/2016/6117/0/6117a032",
"title": "Timing of Coping Instruction Presentation for Real-Time Acute Stress Management: Long-Term Implications for Improved Surgical Performance",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2016/6117a032/12OmNz4SOCI",
"parentPublication": {
"id": "proceedings/ichi/2016/6117/0",
"title": "2016 IEEE International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000b405",
"title": "Video Based Measurement of Heart Rate and Heart Rate Variability Spectrogram from Estimated Hemoglobin Information",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000b405/17D45WHONop",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/chase/2022/9476/0/947600a168",
"title": "Camera-based heart rate variability and stress measurement from facial videos",
"doi": null,
"abstractUrl": "/proceedings-article/chase/2022/947600a168/1JjymzwGjIY",
"parentPublication": {
"id": "proceedings/chase/2022/9476/0",
"title": "2022 IEEE/ACM Conference on Connected Health: Applications, Systems and Engineering Technologies (CHASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797748",
"title": "Developing an Accessible Evaluation Method of VR Cybersickness",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797748/1cJ17GWH4f6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09729550",
"articleId": "1Bya8LDahDa",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09729627",
"articleId": "1Bya9m82uzu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Bya7Ux2z2U",
"doi": "10.1109/TVCG.2022.3155440",
"abstract": "Images in visualization publications contain rich information, e.g., novel visualization designs and implicit design patterns of visualizations. A systematic collection of these images can contribute to the community in many aspects, such as literature analysis and automated tasks for visualization. In this paper, we build and make public a dataset, VisImages, which collects 12,267 images with captions from 1,397 papers in IEEE InfoVis and VAST. Built upon a comprehensive visualization taxonomy, the dataset includes 35,096 visualizations and their bounding boxes in the images. We demonstrate the usefulness of VisImages through three use cases: 1) investigating the use of visualizations in the publications with VisImages Explorer, 2) training and benchmarking models for visualization classification, and 3) localizing visualizations in the visual analytics systems automatically.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Images in visualization publications contain rich information, e.g., novel visualization designs and implicit design patterns of visualizations. A systematic collection of these images can contribute to the community in many aspects, such as literature analysis and automated tasks for visualization. In this paper, we build and make public a dataset, VisImages, which collects 12,267 images with captions from 1,397 papers in IEEE InfoVis and VAST. Built upon a comprehensive visualization taxonomy, the dataset includes 35,096 visualizations and their bounding boxes in the images. We demonstrate the usefulness of VisImages through three use cases: 1) investigating the use of visualizations in the publications with VisImages Explorer, 2) training and benchmarking models for visualization classification, and 3) localizing visualizations in the visual analytics systems automatically.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Images in visualization publications contain rich information, e.g., novel visualization designs and implicit design patterns of visualizations. A systematic collection of these images can contribute to the community in many aspects, such as literature analysis and automated tasks for visualization. In this paper, we build and make public a dataset, VisImages, which collects 12,267 images with captions from 1,397 papers in IEEE InfoVis and VAST. Built upon a comprehensive visualization taxonomy, the dataset includes 35,096 visualizations and their bounding boxes in the images. We demonstrate the usefulness of VisImages through three use cases: 1) investigating the use of visualizations in the publications with VisImages Explorer, 2) training and benchmarking models for visualization classification, and 3) localizing visualizations in the visual analytics systems automatically.",
"title": "VisImages: A Fine-Grained Expert-Annotated Visualization Dataset",
"normalizedTitle": "VisImages: A Fine-Grained Expert-Annotated Visualization Dataset",
"fno": "09729541",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visualization Dataset",
"Crowdsourcing",
"Literature Analysis",
"Visualization Classification",
"Visualization Detection"
],
"authors": [
{
"givenName": "Dazhen",
"surname": "Deng",
"fullName": "Dazhen Deng",
"affiliation": "The State Key Lab of CAD&CG, Zhejiang University, 12377 Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yihong",
"surname": "Wu",
"fullName": "Yihong Wu",
"affiliation": "The State Key Lab of CAD&CG, Zhejiang University, 12377 Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xinhuan",
"surname": "Shu",
"fullName": "Xinhuan Shu",
"affiliation": "Computer Science and Engineering, Hong Kong University of Science and Technology, 58207 Kowloon, Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jiang",
"surname": "Wu",
"fullName": "Jiang Wu",
"affiliation": "The State Key Lab of CAD&CG, Zhejiang University, 12377 Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Siwei",
"surname": "Fu",
"fullName": "Siwei Fu",
"affiliation": "Institute of Artificial Intelligent, Zhejiang Lab, 559075 Hangzhou, Zhejiang Province, China, 311121",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Weiwei",
"surname": "Cui",
"fullName": "Weiwei Cui",
"affiliation": "Software Analytics, Microsoft Research Asia, 216064 Beijing, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yingcai",
"surname": "Wu",
"fullName": "Yingcai Wu",
"affiliation": "Computer Science, Zhejiang University, 12377 Hangzhou, Zhejiang, China, 310058",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vast/2012/4752/0/06400506",
"title": "Visual analytics for network security",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2012/06400506/12OmNC3o50f",
"parentPublication": {
"id": "proceedings/vast/2012/4752/0",
"title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-infovis/2004/8779/0/87790167",
"title": "The InfoVis Toolkit",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/2004/87790167/12OmNwCsdCO",
"parentPublication": {
"id": "proceedings/ieee-infovis/2004/8779/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscc/2016/0679/0/07543793",
"title": "Improving productivity and reducing cost through the use of visualizations for SDN management",
"doi": null,
"abstractUrl": "/proceedings-article/iscc/2016/07543793/12OmNxveNJf",
"parentPublication": {
"id": "proceedings/iscc/2016/0679/0",
"title": "2016 IEEE Symposium on Computers and Communication (ISCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2000/0743/0/07430019",
"title": "BibRelEx: Exploring Bibliographic Databases by Visualization of Annotated Contents-Based Relations",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2000/07430019/12OmNym2c3m",
"parentPublication": {
"id": "proceedings/iv/2000/0743/0",
"title": "2000 IEEE Conference on Information Visualization. An International Conference on Computer Visualization and Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aina/2017/6029/0/07920978",
"title": "A Reuse-Based Approach to Promote the Adoption of Visualizations for Network Management Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/aina/2017/07920978/12OmNzwZ6q3",
"parentPublication": {
"id": "proceedings/aina/2017/6029/0",
"title": "2017 IEEE 31st International Conference on Advanced Information Networking and Applications (AINA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06875946",
"title": "Constructing Visual Representations: Investigating the Use of Tangible Tokens",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06875946/13rRUwgQpDv",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09916137",
"title": "Revisiting the Design Patterns of Composite Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09916137/1HojAjSAGNq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222293",
"title": "Introducing Layers of Meaning (LoM): A Framework to Reduce Semantic Distance of Visualization In Humanistic Research",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222293/1nTrGyTQ0Pm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09727090",
"articleId": "1Brwons3Oa4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09729524",
"articleId": "1Bya8dlokw0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Bya8LDahDa",
"doi": "10.1109/TVCG.2022.3156760",
"abstract": "In this work, we propose an interactive visual approach for the exploration and formation of structural relationships in embeddings of high-dimensional data. These structural relationships, such as item sequences, associations of items with groups, and hierarchies between groups of items, are defining properties of many real-world datasets. Nevertheless, most existing methods for the visual exploration of embeddings treat these structures as second-class citizens or do not take them into account at all. In our proposed analysis workflow, users explore enriched scatterplots of the embedding, in which relationships between items and/or groups are visually highlighted. The original high-dimensional data for single items, groups of items, or differences between connected items and groups is accessible through additional summary visualizations. We carefully tailored these summary and difference visualizations to the various data types and semantic contexts. During their exploratory analysis, users can externalize their insights by setting up additional groups and relationships between items and/or groups. We demonstrate the utility and potential impact of our approach by means of two use cases and multiple examples from various domains.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this work, we propose an interactive visual approach for the exploration and formation of structural relationships in embeddings of high-dimensional data. These structural relationships, such as item sequences, associations of items with groups, and hierarchies between groups of items, are defining properties of many real-world datasets. Nevertheless, most existing methods for the visual exploration of embeddings treat these structures as second-class citizens or do not take them into account at all. In our proposed analysis workflow, users explore enriched scatterplots of the embedding, in which relationships between items and/or groups are visually highlighted. The original high-dimensional data for single items, groups of items, or differences between connected items and groups is accessible through additional summary visualizations. We carefully tailored these summary and difference visualizations to the various data types and semantic contexts. During their exploratory analysis, users can externalize their insights by setting up additional groups and relationships between items and/or groups. We demonstrate the utility and potential impact of our approach by means of two use cases and multiple examples from various domains.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this work, we propose an interactive visual approach for the exploration and formation of structural relationships in embeddings of high-dimensional data. These structural relationships, such as item sequences, associations of items with groups, and hierarchies between groups of items, are defining properties of many real-world datasets. Nevertheless, most existing methods for the visual exploration of embeddings treat these structures as second-class citizens or do not take them into account at all. In our proposed analysis workflow, users explore enriched scatterplots of the embedding, in which relationships between items and/or groups are visually highlighted. The original high-dimensional data for single items, groups of items, or differences between connected items and groups is accessible through additional summary visualizations. We carefully tailored these summary and difference visualizations to the various data types and semantic contexts. During their exploratory analysis, users can externalize their insights by setting up additional groups and relationships between items and/or groups. We demonstrate the utility and potential impact of our approach by means of two use cases and multiple examples from various domains.",
"title": "Visual Exploration of Relationships and Structure in Low-Dimensional Embeddings",
"normalizedTitle": "Visual Exploration of Relationships and Structure in Low-Dimensional Embeddings",
"fno": "09729550",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visualization",
"Task Analysis",
"Layout",
"Data Visualization",
"Space Exploration",
"Visual Analytics",
"Trajectory",
"Dimensionality Reduction",
"Projection",
"Visual Analytics",
"Layout Enrichment",
"Aggregation",
"Comparison"
],
"authors": [
{
"givenName": "Klaus",
"surname": "Eckelt",
"fullName": "Klaus Eckelt",
"affiliation": "Institute of Computer Graphics, Johannes Kepler University Linz, 27266 Linz, Upper Austria, Austria, 4040",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Andreas",
"surname": "Hinterreiter",
"fullName": "Andreas Hinterreiter",
"affiliation": "Institute of Computer Graphics, Johannes Kepler University Linz, 27266 Linz, Upper Austria, Austria, 4040",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Patrick",
"surname": "Adelberger",
"fullName": "Patrick Adelberger",
"affiliation": "Institute of Computer Graphics, Johannes Kepler University Linz, 27266 Linz, Upper Austria, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Conny",
"surname": "Walchshofer",
"fullName": "Conny Walchshofer",
"affiliation": "Institute of Computer Graphics, Johannes Kepler University Linz, 27266 Linz, Upper Austria, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Vaishali",
"surname": "Dhanoa",
"fullName": "Vaishali Dhanoa",
"affiliation": "Institute of Computer Graphics, Johannes Kepler University Linz, 27266 Linz, Upper Austria, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Christina",
"surname": "Humer",
"fullName": "Christina Humer",
"affiliation": "Institute of Computer Graphics, Johannes Kepler University Linz, 27266 Linz, Upper Austria, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Moritz",
"surname": "Heckmann",
"fullName": "Moritz Heckmann",
"affiliation": "Institute of Computer Graphics, Johannes Kepler University Linz, 27266 Linz, Upper Austria, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Christian",
"surname": "Steinparz",
"fullName": "Christian Steinparz",
"affiliation": "Institute of Computer Graphics, Johannes Kepler University Linz, 27266 Linz, Upper Austria, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Marc",
"surname": "Streit",
"fullName": "Marc Streit",
"affiliation": "Institute of Computer Graphics, Johannes Kepler University Linz, 27266 Linz, Upper Austria, Austria",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ism/2008/3454/0/3454a460",
"title": "An Interactive, 3D Visual Exploration Tool for Undirected Relationships",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2008/3454a460/12OmNx5YvcI",
"parentPublication": {
"id": "proceedings/ism/2008/3454/0",
"title": "2008 Tenth IEEE International Symposium on Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/06/ttg2013061034",
"title": "PIWI: Visually Exploring Graphs Based on Their Community Structure",
"doi": null,
"abstractUrl": "/journal/tg/2013/06/ttg2013061034/13rRUxd2aYZ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06876014",
"title": "Visual Exploration of Sparse Traffic Trajectory Data",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06876014/13rRUxjQyvk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06875994",
"title": "ConTour: Data-Driven Exploration of Multi-Relational Datasets for Drug Discovery",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06875994/13rRUxlgy3J",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08019864",
"title": "Visual Exploration of Semantic Relationships in Neural Word Embeddings",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08019864/13rRUzphDy1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08454489",
"title": "Patterns and Pace: Quantifying Diverse Exploration Behavior with Visualizations on the Web",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08454489/17D45W1Oa3s",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09930144",
"title": "Out of the Plane: Flower Vs. Star Glyphs to Support High-Dimensional Exploration in Two-Dimensional Embeddings",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09930144/1HMOX2J2VMY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2022/8812/0/881200a085",
"title": "Let's Get Personal: Exploring the Design of Personalized Visualizations",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2022/881200a085/1J6hbZrS4dG",
"parentPublication": {
"id": "proceedings/vis/2022/8812/0",
"title": "2022 IEEE Visualization and Visual Analytics (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222068",
"title": "<italic>TaxThemis</italic>: Interactive Mining and Exploration of Suspicious Tax Evasion Groups",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222068/1nTqzVqnWU0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09508898",
"title": "Towards Systematic Design Considerations for Visualizing Cross-View Data Relationships",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09508898/1vQzkzRdSWk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09729564",
"articleId": "1Bya8xf11Oo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09729540",
"articleId": "1Bya8YD1tUk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1BzQ6uf5Rf2",
"name": "ttg555501-09729550s1-supp1-3156760.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09729550s1-supp1-3156760.mp4",
"extension": "mp4",
"size": "29.4 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Bya8xf11Oo",
"doi": "10.1109/TVCG.2022.3156949",
"abstract": "Image inpainting that completes large free-form missing regions in images is a promising yet challenging task. State-of-the-art approaches have achieved significant progress by taking advantage of generative adversarial networks (GAN). However, these approaches can suffer from generating distorted structures and blurry textures in high-resolution images (e.g.,512 512). The challenges mainly drive from (1) image content reasoning from distant contexts, and (2) fine-grained texture synthesis for a large missing region. To overcome these two challenges, we propose an enhanced GAN-based model, named Aggregated COntextual-Transformation GAN (AOT-GAN), for high-resolution image inpainting. Specifically, to enhance context reasoning, we construct the generator of AOT-GAN by stacking multiple layers of a proposed AOT block. The AOT blocks aggregate contextual transformations from various receptive fields, allowing to capture both informative distant image contexts and rich patterns of interest for context reasoning. For improving texture synthesis, we enhance the discriminator of AOT-GAN by training it with a tailored mask-prediction task. Such a training objective forces the discriminator to distinguish the detailed appearances of real and synthesized patches, and in turn facilitates the generator to synthesize clear textures. Extensive comparisons on Places2, the most challenging benchmark with 1.8 million high-resolution images of 365 complex scenes, show that our model outperforms the state-of-the-art. A user study including more than 30 subjects further validates the superiority of AOT-GAN. We further evaluate the proposed AOT-GAN in practical applications, e.g., logo removal, face editing, and object removal. Results show that our model achieves promising completions in the real world.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Image inpainting that completes large free-form missing regions in images is a promising yet challenging task. State-of-the-art approaches have achieved significant progress by taking advantage of generative adversarial networks (GAN). However, these approaches can suffer from generating distorted structures and blurry textures in high-resolution images (e.g.,512 512). The challenges mainly drive from (1) image content reasoning from distant contexts, and (2) fine-grained texture synthesis for a large missing region. To overcome these two challenges, we propose an enhanced GAN-based model, named Aggregated COntextual-Transformation GAN (AOT-GAN), for high-resolution image inpainting. Specifically, to enhance context reasoning, we construct the generator of AOT-GAN by stacking multiple layers of a proposed AOT block. The AOT blocks aggregate contextual transformations from various receptive fields, allowing to capture both informative distant image contexts and rich patterns of interest for context reasoning. For improving texture synthesis, we enhance the discriminator of AOT-GAN by training it with a tailored mask-prediction task. Such a training objective forces the discriminator to distinguish the detailed appearances of real and synthesized patches, and in turn facilitates the generator to synthesize clear textures. Extensive comparisons on Places2, the most challenging benchmark with 1.8 million high-resolution images of 365 complex scenes, show that our model outperforms the state-of-the-art. A user study including more than 30 subjects further validates the superiority of AOT-GAN. We further evaluate the proposed AOT-GAN in practical applications, e.g., logo removal, face editing, and object removal. Results show that our model achieves promising completions in the real world.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Image inpainting that completes large free-form missing regions in images is a promising yet challenging task. State-of-the-art approaches have achieved significant progress by taking advantage of generative adversarial networks (GAN). However, these approaches can suffer from generating distorted structures and blurry textures in high-resolution images (e.g.,512 512). The challenges mainly drive from (1) image content reasoning from distant contexts, and (2) fine-grained texture synthesis for a large missing region. To overcome these two challenges, we propose an enhanced GAN-based model, named Aggregated COntextual-Transformation GAN (AOT-GAN), for high-resolution image inpainting. Specifically, to enhance context reasoning, we construct the generator of AOT-GAN by stacking multiple layers of a proposed AOT block. The AOT blocks aggregate contextual transformations from various receptive fields, allowing to capture both informative distant image contexts and rich patterns of interest for context reasoning. For improving texture synthesis, we enhance the discriminator of AOT-GAN by training it with a tailored mask-prediction task. Such a training objective forces the discriminator to distinguish the detailed appearances of real and synthesized patches, and in turn facilitates the generator to synthesize clear textures. Extensive comparisons on Places2, the most challenging benchmark with 1.8 million high-resolution images of 365 complex scenes, show that our model outperforms the state-of-the-art. A user study including more than 30 subjects further validates the superiority of AOT-GAN. We further evaluate the proposed AOT-GAN in practical applications, e.g., logo removal, face editing, and object removal. Results show that our model achieves promising completions in the real world.",
"title": "Aggregated Contextual Transformations for High-Resolution Image Inpainting",
"normalizedTitle": "Aggregated Contextual Transformations for High-Resolution Image Inpainting",
"fno": "09729564",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Generators",
"Generative Adversarial Networks",
"Cognition",
"Training",
"Task Analysis",
"Filling",
"Convolution",
"Image Synthesis",
"Image Inpainting",
"Object Removal",
"Generative Adversarial Networks GAN"
],
"authors": [
{
"givenName": "Yanhong",
"surname": "Zeng",
"fullName": "Yanhong Zeng",
"affiliation": "School of Computer Science and Engineering, Sun Yat-Sen University, 26469 Guangzhou, Guangdong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jianlong",
"surname": "Fu",
"fullName": "Jianlong Fu",
"affiliation": "multimedia, Microsoft Research Asia, 216064 Beijing, Beijing, China, 100080",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hongyang",
"surname": "Chao",
"fullName": "Hongyang Chao",
"affiliation": "Key Laboratory of Machine Intelligence and Advanced Computing, Ministry of Education of the People's Republic of China, 12543 Guangzhou, Guangdong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Baining",
"surname": "Guo",
"fullName": "Baining Guo",
"affiliation": "5F Beijing Sigma Center, Microsoft Research Asia, Beijing, Beijing, China, 100080",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/acpr/2017/3354/0/3354a588",
"title": "Image Inpainting: A Contextual Consistent and Deep Generative Adversarial Training Approach",
"doi": null,
"abstractUrl": "/proceedings-article/acpr/2017/3354a588/17D45WgziOP",
"parentPublication": {
"id": "proceedings/acpr/2017/3354/0",
"title": "2017 4th IAPR Asian Conference on Pattern Recognition (ACPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icbase/2021/2709/0/270900a552",
"title": "BIIR: Blind Inpainting based Image Reconstructon for Texture Defect Detection",
"doi": null,
"abstractUrl": "/proceedings-article/icbase/2021/270900a552/1AH8lBRSKFq",
"parentPublication": {
"id": "proceedings/icbase/2021/2709/0",
"title": "2021 2nd International Conference on Big Data & Artificial Intelligence & Software Engineering (ICBASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnlp/2022/9544/0/954400a081",
"title": "Face Inpainting Algorithm Combining Face Sketch and Gate Convolution",
"doi": null,
"abstractUrl": "/proceedings-article/icnlp/2022/954400a081/1GNtpLkDiV2",
"parentPublication": {
"id": "proceedings/icnlp/2022/9544/0",
"title": "2022 4th International Conference on Natural Language Processing (ICNLP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600l1451",
"title": "RePaint: Inpainting using Denoising Diffusion Probabilistic Models",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600l1451/1H0L6YfOPhS",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600l1411",
"title": "Dual-path Image Inpainting with Auxiliary GAN Inversion",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600l1411/1H0LiHfmVOw",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2022/9744/0/974400a923",
"title": "Image Inpainting with Context Flow Network",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2022/974400a923/1MrFXR3HdXa",
"parentPublication": {
"id": "proceedings/ictai/2022/9744/0",
"title": "2022 IEEE 34th International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102794",
"title": "Eigan: Enhanced Inpainting Generative Adversarial Network",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102794/1kwrlxsf48o",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800h505",
"title": "Contextual Residual Aggregation for Ultra High-Resolution Image Inpainting",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800h505/1m3ndO14G3K",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccnea/2020/7083/0/708300a259",
"title": "Research on Image Inpainting Based on Generative Adversarial Network",
"doi": null,
"abstractUrl": "/proceedings-article/iccnea/2020/708300a259/1oCn35eA06I",
"parentPublication": {
"id": "proceedings/iccnea/2020/7083/0",
"title": "2020 International Conference on Computer Network, Electronic and Automation (ICCNEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccst/2020/8138/0/813800a137",
"title": "Research on Character Image Inpainting based on Generative Adversarial Network",
"doi": null,
"abstractUrl": "/proceedings-article/iccst/2020/813800a137/1p1gu7CZsqI",
"parentPublication": {
"id": "proceedings/iccst/2020/8138/0",
"title": "2020 International Conference on Culture-oriented Science & Technology (ICCST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09729524",
"articleId": "1Bya8dlokw0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09729550",
"articleId": "1Bya8LDahDa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Bya9m82uzu",
"doi": "10.1109/TVCG.2022.3157058",
"abstract": "We present RagRug, an open-source toolkit for situated analytics. The abilities of RagRug go beyond previous immersive analytics toolkits by focusing on specific requirements emerging when using augmented reality (AR) rather than virtual reality. RagRug combines state of the art visual encoding capabilities with a comprehensive physical-virtual model, which lets application developers systematically describe the physical objects in the real world and their role in AR. We connect AR visualization with data streams from the Internet of Things using distributed dataflow. To this aim, we use reactive programming patterns so that visualizations become context-aware, i.e., they adapt to events coming in from the environment. The resulting authoring system is low-code; it emphasises describing the physical and the virtual world and the dataflow between the elements contained therein. We describe the technical design and implementation of RagRug, and report on five example applications illustrating the toolkit's abilities.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present RagRug, an open-source toolkit for situated analytics. The abilities of RagRug go beyond previous immersive analytics toolkits by focusing on specific requirements emerging when using augmented reality (AR) rather than virtual reality. RagRug combines state of the art visual encoding capabilities with a comprehensive physical-virtual model, which lets application developers systematically describe the physical objects in the real world and their role in AR. We connect AR visualization with data streams from the Internet of Things using distributed dataflow. To this aim, we use reactive programming patterns so that visualizations become context-aware, i.e., they adapt to events coming in from the environment. The resulting authoring system is low-code; it emphasises describing the physical and the virtual world and the dataflow between the elements contained therein. We describe the technical design and implementation of RagRug, and report on five example applications illustrating the toolkit's abilities.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present RagRug, an open-source toolkit for situated analytics. The abilities of RagRug go beyond previous immersive analytics toolkits by focusing on specific requirements emerging when using augmented reality (AR) rather than virtual reality. RagRug combines state of the art visual encoding capabilities with a comprehensive physical-virtual model, which lets application developers systematically describe the physical objects in the real world and their role in AR. We connect AR visualization with data streams from the Internet of Things using distributed dataflow. To this aim, we use reactive programming patterns so that visualizations become context-aware, i.e., they adapt to events coming in from the environment. The resulting authoring system is low-code; it emphasises describing the physical and the virtual world and the dataflow between the elements contained therein. We describe the technical design and implementation of RagRug, and report on five example applications illustrating the toolkit's abilities.",
"title": "RagRug: A Toolkit for Situated Analytics",
"normalizedTitle": "RagRug: A Toolkit for Situated Analytics",
"fno": "09729627",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Visualization",
"Solid Modeling",
"Encoding",
"Data Models",
"Computational Modeling",
"Three Dimensional Displays",
"Augmented Reality",
"Visualization",
"Visual Analytics",
"Immersive Analytics",
"Situated Analytics"
],
"authors": [
{
"givenName": "Philipp",
"surname": "Fleck",
"fullName": "Philipp Fleck",
"affiliation": "Institute of Computer Graphics and Vision, Graz University of Technology, 27253 Graz, Styria, Austria, 8010",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Aimee",
"surname": "Sousa Calepso",
"fullName": "Aimee Sousa Calepso",
"affiliation": "Visus, University of Stuttgart, 9149 Stuttgart, Baden-Wrttemberg, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sebastian",
"surname": "Hubenschmid",
"fullName": "Sebastian Hubenschmid",
"affiliation": "Mensch Computer Interaction, University of Konstanz, 26567 Konstanz, Baden-Wrttemberg, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Michael",
"surname": "Sedlmair",
"fullName": "Michael Sedlmair",
"affiliation": "VISUS, University of Stuttgart, 9149 Stuttgart, Baden-Wrttemberg, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dieter",
"surname": "Schmalstieg",
"fullName": "Dieter Schmalstieg",
"affiliation": "Institute for Computer Graphics and Vision, Graz University of Technology, Graz, Styria, Austria, 8010",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/bdva/2015/7343/0/07314302",
"title": "Situated Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/bdva/2015/07314302/12OmNx7G5Tt",
"parentPublication": {
"id": "proceedings/bdva/2015/7343/0",
"title": "2015 Big Data Visual Analytics (BDVA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/live/2013/6265/0/06617339",
"title": "Introducing Circa: A dataflow-based language for live coding",
"doi": null,
"abstractUrl": "/proceedings-article/live/2013/06617339/12OmNzX6csN",
"parentPublication": {
"id": "proceedings/live/2013/6265/0",
"title": "2013 1st International Workshop on Live Programming (LIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223352",
"title": "Using augmented reality to support situated analytics",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223352/12OmNzhELmY",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/04/ttg2013040547",
"title": "The Effects of Visual Realism on Search Tasks in Mixed Reality Simulation",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040547/13rRUwwaKt7",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440858",
"title": "DXR: A Toolkit for Building Immersive Data Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440858/17D45XeKgxQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a472",
"title": "Situated Visualization of IIoT Data on the Hololens 2",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a472/1CJend8tNew",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a054",
"title": "Augmented Scale Models: Presenting Multivariate Data Around Physical Scale Models in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a054/1JrQV01k1hK",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798228",
"title": "On Sharing Physical Geometric Space between Augmented and Virtual Reality Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798228/1cJ0LLaIrPq",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a298",
"title": "Problems with Physical Simulation in a Virtual Lego-based Assembly Task using Unity3D Engine",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a298/1qpzCOjJ5e0",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/03/09645242",
"title": "Labeling Out-of-View Objects in Immersive Analytics to Support Situated Visual Searching",
"doi": null,
"abstractUrl": "/journal/tg/2023/03/09645242/1zc6DjegSGY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09729540",
"articleId": "1Bya8YD1tUk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09732236",
"articleId": "1BBtNDKgNFe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Brwons3Oa4",
"doi": "10.1109/TVCG.2022.3155808",
"abstract": "This work presents an innovative method for point set self-embedding, that encodes the structural information of a dense point set into its sparser version in a visual but imperceptible form. The self-embedded point set can function as the ordinary downsampled one and be visualized efficiently on mobile devices. Particularly, we can leverage the self-embedded information to fully restore the original point set for detailed analysis on remote servers. This new task is challenging, cause both the self-embedded point set and restored point set should resemble the original one. To achieve a learnable self-embedding scheme, we design a novel framework with two jointly-trained networks: one to encode the input point set into its self-embedded sparse point set and the other to leverage the embedded information for inverting the original point set back. Further, we develop a pair of up-shuffle and down-shuffle units in the two networks, and formulate loss terms to encourage the shape similarity and point distribution in the results. Extensive qualitative and quantitative results demonstrate the effectiveness of our method on both synthetic and real-scanned datasets. The source code and trained models will be publicly available at https://github.com/liruihui/Self-Embedding.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This work presents an innovative method for point set self-embedding, that encodes the structural information of a dense point set into its sparser version in a visual but imperceptible form. The self-embedded point set can function as the ordinary downsampled one and be visualized efficiently on mobile devices. Particularly, we can leverage the self-embedded information to fully restore the original point set for detailed analysis on remote servers. This new task is challenging, cause both the self-embedded point set and restored point set should resemble the original one. To achieve a learnable self-embedding scheme, we design a novel framework with two jointly-trained networks: one to encode the input point set into its self-embedded sparse point set and the other to leverage the embedded information for inverting the original point set back. Further, we develop a pair of up-shuffle and down-shuffle units in the two networks, and formulate loss terms to encourage the shape similarity and point distribution in the results. Extensive qualitative and quantitative results demonstrate the effectiveness of our method on both synthetic and real-scanned datasets. The source code and trained models will be publicly available at https://github.com/liruihui/Self-Embedding.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This work presents an innovative method for point set self-embedding, that encodes the structural information of a dense point set into its sparser version in a visual but imperceptible form. The self-embedded point set can function as the ordinary downsampled one and be visualized efficiently on mobile devices. Particularly, we can leverage the self-embedded information to fully restore the original point set for detailed analysis on remote servers. This new task is challenging, cause both the self-embedded point set and restored point set should resemble the original one. To achieve a learnable self-embedding scheme, we design a novel framework with two jointly-trained networks: one to encode the input point set into its self-embedded sparse point set and the other to leverage the embedded information for inverting the original point set back. Further, we develop a pair of up-shuffle and down-shuffle units in the two networks, and formulate loss terms to encourage the shape similarity and point distribution in the results. Extensive qualitative and quantitative results demonstrate the effectiveness of our method on both synthetic and real-scanned datasets. The source code and trained models will be publicly available at https://github.com/liruihui/Self-Embedding.",
"title": "Point Set Self-Embedding",
"normalizedTitle": "Point Set Self-Embedding",
"fno": "09727090",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Task Analysis",
"Shape",
"Point Cloud Compression",
"Visualization",
"Image Restoration",
"Feature Extraction",
"Three Dimensional Displays",
"Point Set Self Embedding",
"Jointly Trained Networks",
"Shape Similarity",
"Point Distribution"
],
"authors": [
{
"givenName": "Ruihui",
"surname": "Li",
"fullName": "Ruihui Li",
"affiliation": "Chinese University of Hong Kong, Hong Kong and Hunan University, Changsha, Hunan 410082, China, and (email: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xianzhi",
"surname": "Li",
"fullName": "Xianzhi Li",
"affiliation": "Computer Science and Technology, Huazhong University of Science and Technology, 12443 Wuhan, Hubei, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tien-Tsin",
"surname": "Wong",
"fullName": "Tien-Tsin Wong",
"affiliation": "Computer Science and Engineering, The Chinese University of Hong Kong, Hong Kong, HKG, Hong Kong, HKG",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chi-Wing",
"surname": "Fu",
"fullName": "Chi-Wing Fu",
"affiliation": "Department of Computer Science and Engineering, The Chinese University of Hong Kong, Hong Kong, Hong Kong, Hong Kong, SIN",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2015/6683/0/6683a094",
"title": "Non-rigid Articulated Point Set Registration for Human Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2015/6683a094/12OmNC2OSPb",
"parentPublication": {
"id": "proceedings/wacv/2015/6683/0",
"title": "2015 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200g383",
"title": "Geometry-Aware Self-Training for Unsupervised Domain Adaptation on Object Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200g383/1BmG4XlYzqo",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200g515",
"title": "Spatio-temporal Self-Supervised Representation Learning for 3D Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200g515/1BmHreVQrSg",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09729524",
"title": "Unsupervised Category-Specific Partial Point Set Registration via Joint Shape Completion and Registration",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09729524/1Bya8dlokw0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/03/09775211",
"title": "Deep Point Set Resampling via Gradient Fields",
"doi": null,
"abstractUrl": "/journal/tp/2023/03/09775211/1Dqh2PmIooM",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09860015",
"title": "Self-Supervised Point Cloud Completion on Real Traffic Scenes Via Scene-Concerned Bottom-Up Mechanism",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09860015/1G9DVAZg2uA",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600i407",
"title": "Voxel Set Transformer: A Set-to-Set Approach to 3D Object Detection from Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600i407/1H0N8QxLrgs",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2022/5670/0/567000a042",
"title": "Point Discriminative Learning for Data-efficient 3D Point Cloud Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2022/567000a042/1KYsxw3MorC",
"parentPublication": {
"id": "proceedings/3dv/2022/5670/0",
"title": "2022 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10106495",
"title": "Variational Relational Point Completion Network for Robust 3D Classification",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10106495/1MwAn9y4Ozu",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800a940",
"title": "NeeDrop: Self-supervised Shape Representation from Sparse Point Clouds using Needle Dropping",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800a940/1zWEezCujxC",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09721603",
"articleId": "1BhzoNy6wWA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09729541",
"articleId": "1Bya7Ux2z2U",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1BtbC34fvJS",
"name": "ttg555501-09727090s1-supp1-3155808.rar",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09727090s1-supp1-3155808.rar",
"extension": "rar",
"size": "49.1 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1BhzoNy6wWA",
"doi": "10.1109/TVCG.2022.3153895",
"abstract": "We study hypergraph visualization via its topological simplification. We explore both vertex simplification and hyperedge simplification of hypergraphs using tools from topological data analysis. In particular, we transform a hypergraph into its graph representations known as the line graph and clique expansion. A topological simplification of such a graph representation induces a simplification of the hypergraph. In simplifying a hypergraph, we allow vertices to be combined if they belong to almost the same set of hyperedges, and hyperedges to be merged if they share almost the same set of vertices. Our proposed approaches are general, mathematically justifiable, and put vertex simplification and hyperedge simplification in a unifying framework.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We study hypergraph visualization via its topological simplification. We explore both vertex simplification and hyperedge simplification of hypergraphs using tools from topological data analysis. In particular, we transform a hypergraph into its graph representations known as the line graph and clique expansion. A topological simplification of such a graph representation induces a simplification of the hypergraph. In simplifying a hypergraph, we allow vertices to be combined if they belong to almost the same set of hyperedges, and hyperedges to be merged if they share almost the same set of vertices. Our proposed approaches are general, mathematically justifiable, and put vertex simplification and hyperedge simplification in a unifying framework.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We study hypergraph visualization via its topological simplification. We explore both vertex simplification and hyperedge simplification of hypergraphs using tools from topological data analysis. In particular, we transform a hypergraph into its graph representations known as the line graph and clique expansion. A topological simplification of such a graph representation induces a simplification of the hypergraph. In simplifying a hypergraph, we allow vertices to be combined if they belong to almost the same set of hyperedges, and hyperedges to be merged if they share almost the same set of vertices. Our proposed approaches are general, mathematically justifiable, and put vertex simplification and hyperedge simplification in a unifying framework.",
"title": "Topological Simplifications of Hypergraphs",
"normalizedTitle": "Topological Simplifications of Hypergraphs",
"fno": "09721603",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visualization",
"Data Visualization",
"Encoding",
"Bipartite Graph",
"Data Analysis",
"Clutter",
"Pipelines",
"Hypergraph Simplification",
"Hypergraph Visualization",
"Graph Simplification",
"Topological Data Analysis"
],
"authors": [
{
"givenName": "Youjia",
"surname": "Zhou",
"fullName": "Youjia Zhou",
"affiliation": "Scientific Computing and Imaging Institute, University of Utah, Salt Lake City, Utah, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Archit",
"surname": "Rathore",
"fullName": "Archit Rathore",
"affiliation": "Computer Science, The University of Utah School of Computing, 415825 Salt Lake City, Utah, United States, 84108",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Emilie",
"surname": "Purvine",
"fullName": "Emilie Purvine",
"affiliation": "Information Modeling & Analysis, Pacific Northwest National Laboratory, Seattle, Washington, United States, 98109",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bei",
"surname": "Wang",
"fullName": "Bei Wang",
"affiliation": "Scientific Computing and Imaging Institute, University of Utah, SALT LAKE CITY, Utah, United States, 84112",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-02-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tp/2017/09/07582510",
"title": "Clustering with Hypergraphs: The Case for Large Hyperedges",
"doi": null,
"abstractUrl": "/journal/tp/2017/09/07582510/13rRUwghdau",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2015/09/07064739",
"title": "Visual Classification by Z_$\\ell _1$_Z -Hypergraph Modeling",
"doi": null,
"abstractUrl": "/journal/tk/2015/09/07064739/13rRUxZzAhZ",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2023/06/09739081",
"title": "Efficient Subhypergraph Matching Based on Hyperedge Features",
"doi": null,
"abstractUrl": "/journal/tk/2023/06/09739081/1BVBKFEH4sw",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/5555/01/09875064",
"title": "HyperISO: Efficiently Searching Subgraph Containment in Hypergraphs",
"doi": null,
"abstractUrl": "/journal/tk/5555/01/09875064/1GlbTSkl9Is",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2023/03/10016680",
"title": "Revisiting Core Maintenance for Dynamic Hypergraphs",
"doi": null,
"abstractUrl": "/journal/td/2023/03/10016680/1JU072eQJmU",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2022/4609/0/460900b197",
"title": "Influence Maximization on Hypergraphs via Similarity-based Diffusion",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2022/460900b197/1KBr2hkaYGk",
"parentPublication": {
"id": "proceedings/icdmw/2022/4609/0",
"title": "2022 IEEE International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asonam/2019/6868/0/09073582",
"title": "Random Preferential Attachment Hypergraph",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2019/09073582/1jjAhNUEqWI",
"parentPublication": {
"id": "proceedings/asonam/2019/6868/0",
"title": "2019 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2022/07/09187569",
"title": "Distributed Hypergraph Processing Using Intersection Graphs",
"doi": null,
"abstractUrl": "/journal/tk/2022/07/09187569/1mVFlr5j4Aw",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom/2020/4380/0/438000b560",
"title": "Hypergraph Attention Networks",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom/2020/438000b560/1r54ktrpwly",
"parentPublication": {
"id": "proceedings/trustcom/2020/4380/0",
"title": "2020 IEEE 19th International Conference on Trust, Security and Privacy in Computing and Communications (TrustCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2021/9184/0/918400c051",
"title": "Hypercore Maintenance in Dynamic Hypergraphs",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2021/918400c051/1uGXh6kpZ2o",
"parentPublication": {
"id": "proceedings/icde/2021/9184/0",
"title": "2021 IEEE 37th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09721695",
"articleId": "1Bhzo1K76IU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09727090",
"articleId": "1Brwons3Oa4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1BmTkjphPcA",
"name": "ttg555501-09721603s1-supp1-3153895.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09721603s1-supp1-3153895.mp4",
"extension": "mp4",
"size": "24.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1BhzmWFFD9K",
"doi": "10.1109/TVCG.2022.3153871",
"abstract": "The growing complexity of spatial and structural information in 3D data makes data inspection and visualization a challenging task. We describe a method to create a planar embedding of 3D treelike structures using their skeleton representations. Our method maintains the original geometry, without overlaps, to the best extent possible, allowing exploration of the topology within a single view. We present a novel camera view generation method which maximizes the visible geometric attributes (segment shape and relative placement between segments). Camera views are created for individual segments and are used to determine local bending angles at each node by projecting them to 2D. The final embedding is generated by minimizing an energy function (the weights of which are user adjustable) based on branch length and the 2D angles, while avoiding intersections. The user can also interactively modify segment placement within the 2D embedding, and the overall embedding will update accordingly. A global to local interactive exploration is provided using hierarchical camera views that are created for subtrees within the structure. We evaluate our method both qualitatively and quantitatively and demonstrate our results by constructing planar visualizations of line data (traced neurons) and volume data (CT vascular and bronchial data).",
"abstracts": [
{
"abstractType": "Regular",
"content": "The growing complexity of spatial and structural information in 3D data makes data inspection and visualization a challenging task. We describe a method to create a planar embedding of 3D treelike structures using their skeleton representations. Our method maintains the original geometry, without overlaps, to the best extent possible, allowing exploration of the topology within a single view. We present a novel camera view generation method which maximizes the visible geometric attributes (segment shape and relative placement between segments). Camera views are created for individual segments and are used to determine local bending angles at each node by projecting them to 2D. The final embedding is generated by minimizing an energy function (the weights of which are user adjustable) based on branch length and the 2D angles, while avoiding intersections. The user can also interactively modify segment placement within the 2D embedding, and the overall embedding will update accordingly. A global to local interactive exploration is provided using hierarchical camera views that are created for subtrees within the structure. We evaluate our method both qualitatively and quantitatively and demonstrate our results by constructing planar visualizations of line data (traced neurons) and volume data (CT vascular and bronchial data).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The growing complexity of spatial and structural information in 3D data makes data inspection and visualization a challenging task. We describe a method to create a planar embedding of 3D treelike structures using their skeleton representations. Our method maintains the original geometry, without overlaps, to the best extent possible, allowing exploration of the topology within a single view. We present a novel camera view generation method which maximizes the visible geometric attributes (segment shape and relative placement between segments). Camera views are created for individual segments and are used to determine local bending angles at each node by projecting them to 2D. The final embedding is generated by minimizing an energy function (the weights of which are user adjustable) based on branch length and the 2D angles, while avoiding intersections. The user can also interactively modify segment placement within the 2D embedding, and the overall embedding will update accordingly. A global to local interactive exploration is provided using hierarchical camera views that are created for subtrees within the structure. We evaluate our method both qualitatively and quantitatively and demonstrate our results by constructing planar visualizations of line data (traced neurons) and volume data (CT vascular and bronchial data).",
"title": "Geometry-Aware Planar Embedding of Treelike Structures",
"normalizedTitle": "Geometry-Aware Planar Embedding of Treelike Structures",
"fno": "09721643",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Three Dimensional Displays",
"Data Visualization",
"Topology",
"Shape",
"Skeleton",
"Cameras",
"Morphology",
"Geometry Based Techniques",
"Camera View Generation",
"Planar Embedding",
"Biomedical Visualization"
],
"authors": [
{
"givenName": "Ping",
"surname": "Hu",
"fullName": "Ping Hu",
"affiliation": "Computer Science, Stony Brook University, 12301 Stony Brook, New York, United States, 11794",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Saeed",
"surname": "Boorboor",
"fullName": "Saeed Boorboor",
"affiliation": "Computer Science, Stony Brook University, Stony Brook, New York, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Joseph",
"surname": "Marino",
"fullName": "Joseph Marino",
"affiliation": "Computer Science, Stony Brook University, Port Jefferson Station, New York, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Arie E.",
"surname": "Kaufman",
"fullName": "Arie E. Kaufman",
"affiliation": "Computer Science, Stony Brook University, Stony Brook, New York, United States",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-02-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/focs/1984/0591/0/0715903",
"title": "Embedding Planar Graphs In Seven Pages",
"doi": null,
"abstractUrl": "/proceedings-article/focs/1984/0715903/12OmNArthdg",
"parentPublication": {
"id": "proceedings/focs/1984/0591/0",
"title": "25th Annual Symposium onFoundations of Computer Science, 1984.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2016/1437/0/1437a516",
"title": "Graph-Constrained Surface Registration Based on Tutte Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2016/1437a516/12OmNBSjISd",
"parentPublication": {
"id": "proceedings/cvprw/2016/1437/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cit/2008/2357/0/04594691",
"title": "Analysis of planar mirror catadioptric stereo systems based on epipolar geometry",
"doi": null,
"abstractUrl": "/proceedings-article/cit/2008/04594691/12OmNCwCLsz",
"parentPublication": {
"id": "proceedings/cit/2008/2357/0",
"title": "2008 8th IEEE International Conference on Computer and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206618",
"title": "Planar orientation from blur gradients in a single image",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206618/12OmNwDSdie",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssst/1994/5320/0/00287921",
"title": "Force embedding of a planar graph",
"doi": null,
"abstractUrl": "/proceedings-article/ssst/1994/00287921/12OmNxxNbUJ",
"parentPublication": {
"id": "proceedings/ssst/1994/5320/0",
"title": "Proceedings of 26th Southeastern Symposium on System Theory",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1999/0149/1/01491022",
"title": "Planar Catadioptric Stereo: Geometry and Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1999/01491022/12OmNzUxO9t",
"parentPublication": {
"id": "proceedings/cvpr/1999/0149/2",
"title": "Proceedings. 1999 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (Cat. No PR00149)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/focs/2013/5135/0/5135a177",
"title": "Non-positive Curvature and the Planar Embedding Conjecture",
"doi": null,
"abstractUrl": "/proceedings-article/focs/2013/5135a177/12OmNzlUKLb",
"parentPublication": {
"id": "proceedings/focs/2013/5135/0",
"title": "2013 IEEE 54th Annual Symposium on Foundations of Computer Science (FOCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2006/07/i1176",
"title": "Motion and Shape Recovery Based on Iterative Stabilization for Modest Deviation from Planar Motion",
"doi": null,
"abstractUrl": "/journal/tp/2006/07/i1176/13rRUILLkwr",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192698",
"title": "Planar Visualization of Treelike Structures",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192698/13rRUxd2aZ3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/02/08778782",
"title": "Absolute Pose Estimation of Central Cameras Using Planar Regions",
"doi": null,
"abstractUrl": "/journal/tp/2021/02/08778782/1fFWEan02nS",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09720180",
"articleId": "1Befc7QugjS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09721695",
"articleId": "1Bhzo1K76IU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1BmTjcbqSv6",
"name": "ttg555501-09721643s1-supp1-3153871.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09721643s1-supp1-3153871.mp4",
"extension": "mp4",
"size": "36.6 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Bhzo1K76IU",
"doi": "10.1109/TVCG.2022.3153838",
"abstract": "Depending on the node ordering, an adjacency matrix can highlight distinct characteristics of a graph. Deriving a "proper" node ordering is thus a critical step in visualizing a graph as an adjacency matrix. Users often try multiple matrix reorderings using different methods until they find one that meets the analysis goal. However, this trial-and-error approach is laborious and disorganized, which is especially challenging for novices. This paper presents a technique that enables users to effortlessly find a matrix reordering they want. Specifically, we design a generative model that learns a latent space of diverse matrix reorderings of the given graph. We also construct an intuitive user interface from the learned latent space by creating a map of various matrix reorderings. We demonstrate our approach through quantitative and qualitative evaluations of the generated reorderings and learned latent spaces. The results show that our model is capable of learning a latent space of diverse matrix reorderings. Most existing research in this area generally focused on developing algorithms that can compute "better" matrix reorderings for particular circumstances. This paper introduces a fundamentally new approach to matrix visualization of a graph, where a machine learning model learns to generate diverse matrix reorderings of a graph.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Depending on the node ordering, an adjacency matrix can highlight distinct characteristics of a graph. Deriving a "proper" node ordering is thus a critical step in visualizing a graph as an adjacency matrix. Users often try multiple matrix reorderings using different methods until they find one that meets the analysis goal. However, this trial-and-error approach is laborious and disorganized, which is especially challenging for novices. This paper presents a technique that enables users to effortlessly find a matrix reordering they want. Specifically, we design a generative model that learns a latent space of diverse matrix reorderings of the given graph. We also construct an intuitive user interface from the learned latent space by creating a map of various matrix reorderings. We demonstrate our approach through quantitative and qualitative evaluations of the generated reorderings and learned latent spaces. The results show that our model is capable of learning a latent space of diverse matrix reorderings. Most existing research in this area generally focused on developing algorithms that can compute "better" matrix reorderings for particular circumstances. This paper introduces a fundamentally new approach to matrix visualization of a graph, where a machine learning model learns to generate diverse matrix reorderings of a graph.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Depending on the node ordering, an adjacency matrix can highlight distinct characteristics of a graph. Deriving a \"proper\" node ordering is thus a critical step in visualizing a graph as an adjacency matrix. Users often try multiple matrix reorderings using different methods until they find one that meets the analysis goal. However, this trial-and-error approach is laborious and disorganized, which is especially challenging for novices. This paper presents a technique that enables users to effortlessly find a matrix reordering they want. Specifically, we design a generative model that learns a latent space of diverse matrix reorderings of the given graph. We also construct an intuitive user interface from the learned latent space by creating a map of various matrix reorderings. We demonstrate our approach through quantitative and qualitative evaluations of the generated reorderings and learned latent spaces. The results show that our model is capable of learning a latent space of diverse matrix reorderings. Most existing research in this area generally focused on developing algorithms that can compute \"better\" matrix reorderings for particular circumstances. This paper introduces a fundamentally new approach to matrix visualization of a graph, where a machine learning model learns to generate diverse matrix reorderings of a graph.",
"title": "A Deep Generative Model for Reordering Adjacency Matrices",
"normalizedTitle": "A Deep Generative Model for Reordering Adjacency Matrices",
"fno": "09721695",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Sorting",
"Data Visualization",
"Neural Networks",
"Computational Modeling",
"Training",
"Computer Architecture",
"Stochastic Processes",
"Graph Visualization",
"Matrix Visualization",
"Machine Learning",
"Deep Generative Model",
"Visualization Interface"
],
"authors": [
{
"givenName": "Oh-Hyun",
"surname": "Kwon",
"fullName": "Oh-Hyun Kwon",
"affiliation": "Computer Science, University of California, Davis, Davis, California, United States, 95616",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chiun-How",
"surname": "Kao",
"fullName": "Chiun-How Kao",
"affiliation": "Department of Statistics, Tamkang University, 34886 Taipei, Taiwan, Taiwan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chun-Houh",
"surname": "Chen",
"fullName": "Chun-Houh Chen",
"affiliation": "Academia Sinica, Institute of Statistical Science, Taipei, Taiwan, Taiwan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kwan-Liu",
"surname": "Ma",
"fullName": "Kwan-Liu Ma",
"affiliation": "Computer Science, University of California at Davis, Davis, California, United States, 95616-8562",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-02-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icdm/2017/3835/0/3835a051",
"title": "Revisiting Spectral Graph Clustering with Generative Community Models",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2017/3835a051/12OmNy50g9y",
"parentPublication": {
"id": "proceedings/icdm/2017/3835/0",
"title": "2017 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acsac/2005/2461/0/24610160",
"title": "Understanding Complex Network Attack Graphs through Clustered Adjacency Matrices",
"doi": null,
"abstractUrl": "/proceedings-article/acsac/2005/24610160/12OmNyKrH6V",
"parentPublication": {
"id": "proceedings/acsac/2005/2461/0",
"title": "Computer Security Applications Conference, Annual",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122457",
"title": "Compressed Adjacency Matrices: Untangling Gene Regulatory Networks",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122457/13rRUNvyakM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/10/08438968",
"title": "Node-Link or Adjacency Matrices: Old Question, New Insights",
"doi": null,
"abstractUrl": "/journal/tg/2019/10/08438968/13rRUwjoNx8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2014/01/ttp2014010048",
"title": "Consistent Latent Position Estimation and Vertex Classification for Random Dot Product Graphs",
"doi": null,
"abstractUrl": "/journal/tp/2014/01/ttp2014010048/13rRUx0ger4",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2022/8045/0/10020413",
"title": "Skew-Symmetric Adjacency Matrices for Clustering Directed Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2022/10020413/1KfRNJ45ri0",
"parentPublication": {
"id": "proceedings/big-data/2022/8045/0",
"title": "2022 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2022/5099/0/509900a201",
"title": "GraphGDP: Generative Diffusion Processes for Permutation Invariant Graph Generation",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2022/509900a201/1KpCt19wCju",
"parentPublication": {
"id": "proceedings/icdm/2022/5099/0",
"title": "2022 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08805452",
"title": "A Deep Generative Model for Graph Layout",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08805452/1cG4LsVN2zS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi/2019/6934/0/08909505",
"title": "Deep Dynamic Mixed Membership Stochastic Blockmodel",
"doi": null,
"abstractUrl": "/proceedings-article/wi/2019/08909505/1febmlTSj6w",
"parentPublication": {
"id": "proceedings/wi/2019/6934/0",
"title": "2019 IEEE/WIC/ACM International Conference on Web Intelligence (WI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2020/9574/0/957400a523",
"title": "A Study on the Effect of Distinct Adjacency Matrices for Graph Signal Denoising",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2020/957400a523/1pBMt2iKtk4",
"parentPublication": {
"id": "proceedings/bibe/2020/9574/0",
"title": "2020 IEEE 20th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09721643",
"articleId": "1BhzmWFFD9K",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09721603",
"articleId": "1BhzoNy6wWA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Befc7QugjS",
"doi": "10.1109/TVCG.2022.3153514",
"abstract": "Network graphs are common visualization charts. They often appear in the form of bitmaps in papers, web pages, magazine prints, and designer sketches. People often want to modify network graphs because of their poor design, but it is difficult to obtain their underlying data. In this paper, we present VividGraph, a pipeline for automatically extracting and redesigning network graphs from static images. We propose using convolutional neural networks to solve the problem of network graph data extraction. Our method is robust to hand-drawn graphs, blurred graph images, and large graph images. We also present a network graph classification module to make it effective for directed graphs. We propose two evaluation methods to demonstrate the effectiveness of our approach. It can be used to quickly transform designer sketches, extract underlying data from existing network graphs, and interactively redesign poorly designed network graphs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Network graphs are common visualization charts. They often appear in the form of bitmaps in papers, web pages, magazine prints, and designer sketches. People often want to modify network graphs because of their poor design, but it is difficult to obtain their underlying data. In this paper, we present VividGraph, a pipeline for automatically extracting and redesigning network graphs from static images. We propose using convolutional neural networks to solve the problem of network graph data extraction. Our method is robust to hand-drawn graphs, blurred graph images, and large graph images. We also present a network graph classification module to make it effective for directed graphs. We propose two evaluation methods to demonstrate the effectiveness of our approach. It can be used to quickly transform designer sketches, extract underlying data from existing network graphs, and interactively redesign poorly designed network graphs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Network graphs are common visualization charts. They often appear in the form of bitmaps in papers, web pages, magazine prints, and designer sketches. People often want to modify network graphs because of their poor design, but it is difficult to obtain their underlying data. In this paper, we present VividGraph, a pipeline for automatically extracting and redesigning network graphs from static images. We propose using convolutional neural networks to solve the problem of network graph data extraction. Our method is robust to hand-drawn graphs, blurred graph images, and large graph images. We also present a network graph classification module to make it effective for directed graphs. We propose two evaluation methods to demonstrate the effectiveness of our approach. It can be used to quickly transform designer sketches, extract underlying data from existing network graphs, and interactively redesign poorly designed network graphs.",
"title": "VividGraph: Learning to Extract and Redesign Network Graphs from Visualization Images",
"normalizedTitle": "VividGraph: Learning to Extract and Redesign Network Graphs from Visualization Images",
"fno": "09720180",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Mining",
"Semantics",
"Image Color Analysis",
"Image Segmentation",
"Data Visualization",
"Pipelines",
"Image Edge Detection",
"Information Visualization",
"Network Graph",
"Data Extraction",
"Chart Recognition",
"Semantic Segmentation",
"Redesign"
],
"authors": [
{
"givenName": "Sicheng",
"surname": "Song",
"fullName": "Sicheng Song",
"affiliation": "School of Computer Science and Technology, East China Normal University, 12655 Shanghai, Shanghai, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chenhui",
"surname": "Li",
"fullName": "Chenhui Li",
"affiliation": "School of Computer Science and Technology, East China Normal University, 12655 Shanghai, Shanghai, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yujing",
"surname": "Sun",
"fullName": "Yujing Sun",
"affiliation": "School of Computer Science and Technology, East China Normal University, 12655 Shanghai, Shanghai, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Changbo",
"surname": "Wang",
"fullName": "Changbo Wang",
"affiliation": "School of Computer Science and Technology, East China Normal University, 12655 Shanghai, Shanghai, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-02-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icdmw/2017/3800/0/3800a968",
"title": "Online Detection of Anomalous Heterogeneous Graphs with Streaming Edges",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2017/3800a968/12OmNCxL9QI",
"parentPublication": {
"id": "proceedings/icdmw/2017/3800/0",
"title": "2017 IEEE International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2013/4797/0/06596126",
"title": "Smooth bundling of large streaming and sequence graphs",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2013/06596126/12OmNscfI0r",
"parentPublication": {
"id": "proceedings/pacificvis/2013/4797/0",
"title": "2013 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2015/9711/0/5720b071",
"title": "Video Summarization via Segments Summary Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2015/5720b071/12OmNvxbhKS",
"parentPublication": {
"id": "proceedings/iccvw/2015/9711/0",
"title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2013/3142/0/3143a521",
"title": "Incremental Anomaly Detection in Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2013/3143a521/12OmNxE2mRX",
"parentPublication": {
"id": "proceedings/icdmw/2013/3142/0",
"title": "2013 IEEE 13th International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icppw/2014/5615/0/5615a149",
"title": "Flow Graph Designer: A Tool for Designing and Analyzing Intel® Threading Building Blocks Flow Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/icppw/2014/5615a149/12OmNyQGRYJ",
"parentPublication": {
"id": "proceedings/icppw/2014/5615/0",
"title": "2014 43nd International Conference on Parallel Processing Workshops (ICCPW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispdc/2016/4152/0/07904268",
"title": "Finding SCCs in Real-World Graphs on External Memory: A Task-Based Approach",
"doi": null,
"abstractUrl": "/proceedings-article/ispdc/2016/07904268/12OmNzdoMP9",
"parentPublication": {
"id": "proceedings/ispdc/2016/4152/0",
"title": "2016 15th International Symposium on Parallel and Distributed Computing (ISPDC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/11/06812198",
"title": "Visual Adjacency Lists for Dynamic Graphs",
"doi": null,
"abstractUrl": "/journal/tg/2014/11/06812198/13rRUxcbnCs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2018/06/08617746",
"title": "Graphoto: Aesthetically Pleasing Charts for Casual Information Visualization",
"doi": null,
"abstractUrl": "/magazine/cg/2018/06/08617746/17D45Xbl4Oc",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2018/9288/0/928800a219",
"title": "MinerLSD: Efficient Local Pattern Mining on Attributed Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2018/928800a219/18jXy2pmyR2",
"parentPublication": {
"id": "proceedings/icdmw/2018/9288/0",
"title": "2018 IEEE International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09966829",
"title": "GraphDecoder: Recovering Diverse Network Graphs from Visualization Images via Attention-Aware Learning",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09966829/1IIYlkz8kkE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09720214",
"articleId": "1BefbMXPO3C",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09721643",
"articleId": "1BhzmWFFD9K",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1BfU9JIY45W",
"name": "ttg555501-09720180s1-supp2-3153514.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09720180s1-supp2-3153514.mp4",
"extension": "mp4",
"size": "91.5 MB",
"__typename": "WebExtraType"
},
{
"id": "1BfU9WuDjRS",
"name": "ttg555501-09720180s1-supp1-3153514.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09720180s1-supp1-3153514.pdf",
"extension": "pdf",
"size": "2.31 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1BefbMXPO3C",
"doi": "10.1109/TVCG.2022.3153501",
"abstract": "In this paper, we present TeethGNN, a novel 3D tooth segmentation method based on graph neural networks (GNNs). Given a mesh-represented 3D dental model in non-Euclidean domain, our method outputs accurate and fine-grained separation of each individual tooth robust to scanning noise, foreign matters (e.g., bubbles, dental accessories, etc.), and even severe malocclusion. Unlike previous CNN-based methods that bypass handling non-Euclidean mesh data by reshaping hand-crafted geometric features into regular grids, we explore the non-uniform and irregular structure of mesh itself in its dual space and exploit graph neural networks for effective geometric feature learning. To address the crowded teeth issues and incomplete segmentation that commonly exist in previous methods, we design a two-branch network, one of which predicts a segmentation label for each facet while the other regresses each facet an offset away from its tooth centroid. Clustering are later conducted on offset-shifted locations, enabling both the separation of adjoining teeth and the adjustment of incompletely segmented teeth. Exploiting GNN for directly processing mesh data frees us from extracting hand-crafted feature, and largely speeds up the inference procedure. Extensive experiments have shown that our method achieves the new state-of-the-art results for teeth segmentation and outperforms previous methods both quantitatively and qualitatively.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we present TeethGNN, a novel 3D tooth segmentation method based on graph neural networks (GNNs). Given a mesh-represented 3D dental model in non-Euclidean domain, our method outputs accurate and fine-grained separation of each individual tooth robust to scanning noise, foreign matters (e.g., bubbles, dental accessories, etc.), and even severe malocclusion. Unlike previous CNN-based methods that bypass handling non-Euclidean mesh data by reshaping hand-crafted geometric features into regular grids, we explore the non-uniform and irregular structure of mesh itself in its dual space and exploit graph neural networks for effective geometric feature learning. To address the crowded teeth issues and incomplete segmentation that commonly exist in previous methods, we design a two-branch network, one of which predicts a segmentation label for each facet while the other regresses each facet an offset away from its tooth centroid. Clustering are later conducted on offset-shifted locations, enabling both the separation of adjoining teeth and the adjustment of incompletely segmented teeth. Exploiting GNN for directly processing mesh data frees us from extracting hand-crafted feature, and largely speeds up the inference procedure. Extensive experiments have shown that our method achieves the new state-of-the-art results for teeth segmentation and outperforms previous methods both quantitatively and qualitatively.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we present TeethGNN, a novel 3D tooth segmentation method based on graph neural networks (GNNs). Given a mesh-represented 3D dental model in non-Euclidean domain, our method outputs accurate and fine-grained separation of each individual tooth robust to scanning noise, foreign matters (e.g., bubbles, dental accessories, etc.), and even severe malocclusion. Unlike previous CNN-based methods that bypass handling non-Euclidean mesh data by reshaping hand-crafted geometric features into regular grids, we explore the non-uniform and irregular structure of mesh itself in its dual space and exploit graph neural networks for effective geometric feature learning. To address the crowded teeth issues and incomplete segmentation that commonly exist in previous methods, we design a two-branch network, one of which predicts a segmentation label for each facet while the other regresses each facet an offset away from its tooth centroid. Clustering are later conducted on offset-shifted locations, enabling both the separation of adjoining teeth and the adjustment of incompletely segmented teeth. Exploiting GNN for directly processing mesh data frees us from extracting hand-crafted feature, and largely speeds up the inference procedure. Extensive experiments have shown that our method achieves the new state-of-the-art results for teeth segmentation and outperforms previous methods both quantitatively and qualitatively.",
"title": "TeethGNN: Semantic 3D Teeth Segmentation with Graph Neural Networks",
"normalizedTitle": "TeethGNN: Semantic 3D Teeth Segmentation with Graph Neural Networks",
"fno": "09720214",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Teeth",
"Feature Extraction",
"Three Dimensional Displays",
"Semantics",
"Image Segmentation",
"Deep Learning",
"Representation Learning",
"3 D Teeth Segmentation",
"Graph Neural Network",
"Geometric Deep Learning",
"Clustering"
],
"authors": [
{
"givenName": "Youyi",
"surname": "Zheng",
"fullName": "Youyi Zheng",
"affiliation": "Computer Science, Zhejiang University College of Computer Science and Technology, 366095 Hangzhou, Zhejiang, China, 310027",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Beijia",
"surname": "Chen",
"fullName": "Beijia Chen",
"affiliation": "The State Key Lab of CAD&CG, Zhejiang University, 12377 Hangzhou, Zhejiang, China, 310058",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yuefan",
"surname": "Shen",
"fullName": "Yuefan Shen",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, 12377 Hangzhou, Zhejiang, China, 310058",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kaidi",
"surname": "Shen",
"fullName": "Kaidi Shen",
"affiliation": "AI Lab, Hangzhou Zoho Information Technology Co. Ltd., Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-02-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ism/2012/4875/0/4875a092",
"title": "Automatic Classification of Teeth in Bitewing Dental Images Using OLPP",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2012/4875a092/12OmNyr8YlK",
"parentPublication": {
"id": "proceedings/ism/2012/4875/0",
"title": "2012 IEEE International Symposium on Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2012/4875/0/4875a145",
"title": "A New Approach to Teeth Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2012/4875a145/12OmNzlUKwe",
"parentPublication": {
"id": "proceedings/ism/2012/4875/0",
"title": "2012 IEEE International Symposium on Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/07/08362667",
"title": "3D Tooth Segmentation and Labeling Using Deep Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/journal/tg/2019/07/08362667/13rRUEgs2C4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600u0720",
"title": "DArch: Dental Arch Prior-assisted 3D Tooth Instance Segmentation with Weak Annotations",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600u0720/1H1kFKjFl16",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956708",
"title": "Automatic teeth segmentation on panoramic X-rays using deep neural networks",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956708/1IHqmXCw89O",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2022/6819/0/09995297",
"title": "LeFUNet: UNet with Learnable Feature Connections for Teeth Identification and Segmentation in Dental Panoramic X-ray Images",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2022/09995297/1JC3u99qicM",
"parentPublication": {
"id": "proceedings/bibm/2022/6819/0",
"title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600f583",
"title": "Self-Supervised Learning with Masked Image Modeling for Teeth Numbering, Detection of Dental Restorations, and Instance Segmentation in Dental Panoramic Radiographs",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600f583/1L8qxxnly2k",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2020/9274/0/927400a164",
"title": "A study on tooth segmentation and numbering using end-to-end deep neural networks",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2020/927400a164/1p2VzkB4pji",
"parentPublication": {
"id": "proceedings/sibgrapi/2020/9274/0",
"title": "2020 33rd SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismvl/2020/5406/0/540600a016",
"title": "Automatic Teeth Recognition in Dental X-Ray Images Using Transfer Learning Based Faster R-CNN",
"doi": null,
"abstractUrl": "/proceedings-article/ismvl/2020/540600a016/1qciaA2XbHy",
"parentPublication": {
"id": "proceedings/ismvl/2020/5406/0",
"title": "2020 IEEE 50th International Symposium on Multiple-Valued Logic (ISMVL)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/10/09445658",
"title": "A Fully Automated Method for 3D Individual Tooth Identification and Segmentation in Dental CBCT",
"doi": null,
"abstractUrl": "/journal/tp/2022/10/09445658/1uaajNYaeQw",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09716779",
"articleId": "1B5WCvEX76E",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09720180",
"articleId": "1Befc7QugjS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1B5WCvEX76E",
"doi": "10.1109/TVCG.2022.3152450",
"abstract": "To reduce the number of pending cases and conflicting rulings in the Brazilian Judiciary, the National Congress amended the Constitution, allowing the Brazilian Supreme Court (STF) to create binding precedents (BPs), i.e., a set of understandings that both Executive and lower Judiciary branches must follow. The STF's justices frequently cite the 58 existing BPs in their decisions, and it is of primary relevance that judicial experts could identify and analyze such citations. To assist in this problem, we propose LegalVis, a web-based visual analytics system designed to support the analysis of legal documents that cite or could potentially cite a BP. We model the problem of identifying potential citations (i.e., non-explicit) as a classification problem. However, a simple score is not enough to explain the results; that is why we use an interpretability machine learning method to explain the reason behind each identified citation. For a compelling visual exploration of documents and BPs, LegalVis comprises three interactive visual components: the first presents an overview of the data showing temporal patterns, the second allows filtering and grouping relevant documents by topic, and the last one shows a document's text aiming to interpret the model's output by pointing out which paragraphs are likely to mention the BP, even if not explicitly specified. We evaluated our identification model and obtained an accuracy of 96%; we also made a quantitative and qualitative analysis of the results. The usefulness and effectiveness of LegalVis were evaluated through two usage scenarios and feedback from six domain experts.",
"abstracts": [
{
"abstractType": "Regular",
"content": "To reduce the number of pending cases and conflicting rulings in the Brazilian Judiciary, the National Congress amended the Constitution, allowing the Brazilian Supreme Court (STF) to create binding precedents (BPs), i.e., a set of understandings that both Executive and lower Judiciary branches must follow. The STF's justices frequently cite the 58 existing BPs in their decisions, and it is of primary relevance that judicial experts could identify and analyze such citations. To assist in this problem, we propose LegalVis, a web-based visual analytics system designed to support the analysis of legal documents that cite or could potentially cite a BP. We model the problem of identifying potential citations (i.e., non-explicit) as a classification problem. However, a simple score is not enough to explain the results; that is why we use an interpretability machine learning method to explain the reason behind each identified citation. For a compelling visual exploration of documents and BPs, LegalVis comprises three interactive visual components: the first presents an overview of the data showing temporal patterns, the second allows filtering and grouping relevant documents by topic, and the last one shows a document's text aiming to interpret the model's output by pointing out which paragraphs are likely to mention the BP, even if not explicitly specified. We evaluated our identification model and obtained an accuracy of 96%; we also made a quantitative and qualitative analysis of the results. The usefulness and effectiveness of LegalVis were evaluated through two usage scenarios and feedback from six domain experts.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "To reduce the number of pending cases and conflicting rulings in the Brazilian Judiciary, the National Congress amended the Constitution, allowing the Brazilian Supreme Court (STF) to create binding precedents (BPs), i.e., a set of understandings that both Executive and lower Judiciary branches must follow. The STF's justices frequently cite the 58 existing BPs in their decisions, and it is of primary relevance that judicial experts could identify and analyze such citations. To assist in this problem, we propose LegalVis, a web-based visual analytics system designed to support the analysis of legal documents that cite or could potentially cite a BP. We model the problem of identifying potential citations (i.e., non-explicit) as a classification problem. However, a simple score is not enough to explain the results; that is why we use an interpretability machine learning method to explain the reason behind each identified citation. For a compelling visual exploration of documents and BPs, LegalVis comprises three interactive visual components: the first presents an overview of the data showing temporal patterns, the second allows filtering and grouping relevant documents by topic, and the last one shows a document's text aiming to interpret the model's output by pointing out which paragraphs are likely to mention the BP, even if not explicitly specified. We evaluated our identification model and obtained an accuracy of 96%; we also made a quantitative and qualitative analysis of the results. The usefulness and effectiveness of LegalVis were evaluated through two usage scenarios and feedback from six domain experts.",
"title": "LegalVis: Exploring and Inferring Precedent Citations in Legal Documents",
"normalizedTitle": "LegalVis: Exploring and Inferring Precedent Citations in Legal Documents",
"fno": "09716779",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Law",
"Data Visualization",
"Visual Analytics",
"Task Analysis",
"Text Analysis",
"Analytical Models",
"Natural Language Processing",
"Legal Documents",
"Visual Analytics",
"Brazilian Legal System",
"Natural Language Processing"
],
"authors": [
{
"givenName": "Lucas Emanuel",
"surname": "Resck",
"fullName": "Lucas Emanuel Resck",
"affiliation": "School of Applied Mathematics, Fundacao Getulio Vargas, 42500 Rio de Janeiro, RJ, Brazil",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jean R.",
"surname": "Ponciano",
"fullName": "Jean R. Ponciano",
"affiliation": "School of Applied Mathematics, Fundacao Getulio Vargas, 42500 Rio de Janeiro, RJ, Brazil",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Luis Gustavo",
"surname": "Nonato",
"fullName": "Luis Gustavo Nonato",
"affiliation": "Computao e Estatstica, Universidade de So Paulo, So Carlos, Sao Paulo, Brazil, 13560-970",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jorge",
"surname": "Poco",
"fullName": "Jorge Poco",
"affiliation": "School of Applied Mathematics, Fundacao Getulio Vargas, 42500 Rio de Janeiro, Rio de Janeiro, Brazil, 22250-900",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-02-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/jcdl/2006/354/0/04119100",
"title": "A comparative study of citations and links in document classification",
"doi": null,
"abstractUrl": "/proceedings-article/jcdl/2006/04119100/12OmNviZljW",
"parentPublication": {
"id": "proceedings/jcdl/2006/354/0",
"title": "2006 IEEE/ACM 6th Joint Conference on Digital Libraries",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/skg/2016/4795/0/4795a054",
"title": "Summarization of Related Work through Citations",
"doi": null,
"abstractUrl": "/proceedings-article/skg/2016/4795a054/12OmNvjgWuo",
"parentPublication": {
"id": "proceedings/skg/2016/4795/0",
"title": "2016 12th International Conference on Semantics, Knowledge and Grids (SKG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/jcdl/2016/4229/0/07559580",
"title": "Predicting medical subject headings based on abstract similarity and citations to MEDLINE records",
"doi": null,
"abstractUrl": "/proceedings-article/jcdl/2016/07559580/12OmNx19jWP",
"parentPublication": {
"id": "proceedings/jcdl/2016/4229/0",
"title": "2016 IEEE/ACM Joint Conference on Digital Libraries (JCDL)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/e-science/2015/9325/0/9325a288",
"title": "Interactive Analytic Systems for Understanding the Scholarly Impact of Large-Scale E-science Cyberenvironments",
"doi": null,
"abstractUrl": "/proceedings-article/e-science/2015/9325a288/12OmNxiKscb",
"parentPublication": {
"id": "proceedings/e-science/2015/9325/0",
"title": "2015 IEEE 11th International Conference on e-Science (e-Science)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ic3/2017/3077/0/08284324",
"title": "Approaches for information retrieval in legal documents",
"doi": null,
"abstractUrl": "/proceedings-article/ic3/2017/08284324/12OmNzBwGop",
"parentPublication": {
"id": "proceedings/ic3/2017/3077/0",
"title": "2017 Tenth International Conference on Contemporary Computing (IC3)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2018/7202/0/720200a241",
"title": "Cartographies of the Legal World. Rise and Challenges of Visual Legal Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2018/720200a241/17D45WrVg8T",
"parentPublication": {
"id": "proceedings/iv/2018/7202/0",
"title": "2018 22nd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/istm/2022/7116/0/711600a007",
"title": "Interpretable Text Classification in Legal Contract Documents using Tsetlin Machines",
"doi": null,
"abstractUrl": "/proceedings-article/istm/2022/711600a007/1HJzGIZp65G",
"parentPublication": {
"id": "proceedings/istm/2022/7116/0",
"title": "2022 International Symposium on the Tsetlin Machine (ISTM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/jcdl/2019/1547/0/154700a120",
"title": "Improving Academic Plagiarism Detection for STEM Documents by Analyzing Mathematical Content and Citations",
"doi": null,
"abstractUrl": "/proceedings-article/jcdl/2019/154700a120/1ckrHKT4bgA",
"parentPublication": {
"id": "proceedings/jcdl/2019/1547/0",
"title": "2019 ACM/IEEE Joint Conference on Digital Libraries (JCDL)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2020/6251/0/09377970",
"title": "A bibliometric network analysis of Deep Learning publications applied into legal documents",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2020/09377970/1s64Bs1mh6E",
"parentPublication": {
"id": "proceedings/big-data/2020/6251/0",
"title": "2020 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2020/6251/0/09378182",
"title": "CNN Application in Detection of Privileged Documents in Legal Document Review",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2020/09378182/1s64SyTIVAk",
"parentPublication": {
"id": "proceedings/big-data/2020/6251/0",
"title": "2020 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09715049",
"articleId": "1B2DbhImWwE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09720214",
"articleId": "1BefbMXPO3C",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1BaVNk4yHg4",
"name": "ttg555501-09716779s1-supp2-3152450.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09716779s1-supp2-3152450.mp4",
"extension": "mp4",
"size": "15.1 MB",
"__typename": "WebExtraType"
},
{
"id": "1BaVNqhsLDy",
"name": "ttg555501-09716779s1-supp1-3152450.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09716779s1-supp1-3152450.pdf",
"extension": "pdf",
"size": "1.95 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1B2DbhImWwE",
"doi": "10.1109/TVCG.2021.3135764",
"abstract": "Crepuscular rays form when light encounters an optically thick or opaque medium which masks out portions of the visible scene. Real-time applications commonly estimate this phenomena by connecting paths between light sources and the camera after a single scattering event. We provide a set of algorithms for solving integration and sampling of single-scattered collimated light in a box-shaped medium and show how they extend to multiple scattering and convex media. First, a method for exactly integrating the unoccluded single scattering in rectilinear box-shaped medium is proposed and paired with a ratio estimator and moment-based approximation. Compared to previous methods, it requires only a single sample in unoccluded areas to compute the whole integral solution and provides greater convergence in the rest of the scene. Second, we derive an importance sampling scheme accounting for the entire geometry of the medium. This sampling strategy is then incorporated in an optimized Monte Carlo integration. The resulting integration scheme yields visible noise reduction and it is directly applicable to indoor scene rendering in room-scale interactive experiences. Furthermore, it extends to multiple light sources and achieves superior converge compared to independent sampling with existing algorithms. We validate our techniques against previous methods based on ray marching and distance sampling to prove their superior noise reduction capability.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Crepuscular rays form when light encounters an optically thick or opaque medium which masks out portions of the visible scene. Real-time applications commonly estimate this phenomena by connecting paths between light sources and the camera after a single scattering event. We provide a set of algorithms for solving integration and sampling of single-scattered collimated light in a box-shaped medium and show how they extend to multiple scattering and convex media. First, a method for exactly integrating the unoccluded single scattering in rectilinear box-shaped medium is proposed and paired with a ratio estimator and moment-based approximation. Compared to previous methods, it requires only a single sample in unoccluded areas to compute the whole integral solution and provides greater convergence in the rest of the scene. Second, we derive an importance sampling scheme accounting for the entire geometry of the medium. This sampling strategy is then incorporated in an optimized Monte Carlo integration. The resulting integration scheme yields visible noise reduction and it is directly applicable to indoor scene rendering in room-scale interactive experiences. Furthermore, it extends to multiple light sources and achieves superior converge compared to independent sampling with existing algorithms. We validate our techniques against previous methods based on ray marching and distance sampling to prove their superior noise reduction capability.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Crepuscular rays form when light encounters an optically thick or opaque medium which masks out portions of the visible scene. Real-time applications commonly estimate this phenomena by connecting paths between light sources and the camera after a single scattering event. We provide a set of algorithms for solving integration and sampling of single-scattered collimated light in a box-shaped medium and show how they extend to multiple scattering and convex media. First, a method for exactly integrating the unoccluded single scattering in rectilinear box-shaped medium is proposed and paired with a ratio estimator and moment-based approximation. Compared to previous methods, it requires only a single sample in unoccluded areas to compute the whole integral solution and provides greater convergence in the rest of the scene. Second, we derive an importance sampling scheme accounting for the entire geometry of the medium. This sampling strategy is then incorporated in an optimized Monte Carlo integration. The resulting integration scheme yields visible noise reduction and it is directly applicable to indoor scene rendering in room-scale interactive experiences. Furthermore, it extends to multiple light sources and achieves superior converge compared to independent sampling with existing algorithms. We validate our techniques against previous methods based on ray marching and distance sampling to prove their superior noise reduction capability.",
"title": "Collimated Whole Volume Light Scattering in Homogeneous Finite Media",
"normalizedTitle": "Collimated Whole Volume Light Scattering in Homogeneous Finite Media",
"fno": "09715049",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Geometry",
"Cameras",
"Scattering",
"Rendering Computer Graphics",
"Real Time Systems",
"Monte Carlo Methods",
"Light Sources",
"Raytracing",
"Color",
"Shading",
"Shadowing",
"Texture"
],
"authors": [
{
"givenName": "Zdravko Venchev",
"surname": "Velinov",
"fullName": "Zdravko Venchev Velinov",
"affiliation": "DRLA, Walt Disney Imagineering, 115140 Glendale, California, United States, 91221-5020 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kenny",
"surname": "Mitchell",
"fullName": "Kenny Mitchell",
"affiliation": "Home, Disney Research, San Jose, California, United States, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-02-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2015/8391/0/8391d415",
"title": "Photometric Stereo in a Scattering Medium",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391d415/12OmNBV9Igo",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209e382",
"title": "Light Transport Refocusing for Unknown Scattering Medium",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209e382/12OmNqzu6Nb",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eqec/2005/8973/0/01567441",
"title": "Quantum optics in multiple scattering random media",
"doi": null,
"abstractUrl": "/proceedings-article/eqec/2005/01567441/12OmNyGtjoY",
"parentPublication": {
"id": "proceedings/eqec/2005/8973/0",
"title": "2005 European Quantum Electronics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2013/3211/0/3211a093",
"title": "Multiple-Scattering Optical Tomography with Layered Material",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2013/3211a093/12OmNzRZpYR",
"parentPublication": {
"id": "proceedings/sitis/2013/3211/0",
"title": "2013 International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sc/1990/2056/0/00130087",
"title": "Massively parallel computational methods in light scattering by small particles",
"doi": null,
"abstractUrl": "/proceedings-article/sc/1990/00130087/12OmNzmclKg",
"parentPublication": {
"id": "proceedings/sc/1990/2056/0",
"title": "SC Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/02/v0342",
"title": "Light Scattering from Filaments",
"doi": null,
"abstractUrl": "/journal/tg/2007/02/v0342/13rRUwI5TXt",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2017/09/07577857",
"title": "Photometric Stereo in a Scattering Medium",
"doi": null,
"abstractUrl": "/journal/tp/2017/09/07577857/13rRUxYIMWv",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2013/03/mcg2013030053",
"title": "Real-Time Screen-Space Scattering in Homogeneous Environments",
"doi": null,
"abstractUrl": "/magazine/cg/2013/03/mcg2013030053/13rRUygT7cK",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/07/08600345",
"title": "Precomputed Multiple Scattering for Rapid Light Simulation in Participating Media",
"doi": null,
"abstractUrl": "/journal/tg/2020/07/08600345/17D45Xh13tH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcmeim/2019/5045/0/504500a451",
"title": "Markov Chain and Monte Carlo Predictions for Light Multiple Scattering Applications",
"doi": null,
"abstractUrl": "/proceedings-article/wcmeim/2019/504500a451/1hHLsCdHDWw",
"parentPublication": {
"id": "proceedings/wcmeim/2019/5045/0",
"title": "2019 2nd World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09714170",
"articleId": "1B0Y0ltSIfK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09716779",
"articleId": "1B5WCvEX76E",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1B5WDlG5uZG",
"name": "ttg555501-09715049s1-tvcg-3135764-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09715049s1-tvcg-3135764-mm.zip",
"extension": "zip",
"size": "65.3 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1B0Y0ltSIfK",
"doi": "10.1109/TVCG.2022.3151617",
"abstract": "During the creation of graphic designs, individuals inevitably spend a lot of time and effort on adjusting visual attributes (e.g., positions, colors, and fonts) of elements to make them more aesthetically pleasing. It is a trial-and-error process, requires repetitive edits, and relies on good design knowledge. In this work, we seek to alleviate such difficulty by automatically suggesting aesthetic improvements, i.e., taking an existing design as the input and generating a refined version with improved aesthetic quality as the output. This goal presents two challenges: proposing a refined design based on the user-given one, and assessing whether the new design is better aesthetically. To cope with these challenges, we propose a design principle-guided candidate generation stage and a data-driven candidate evaluation stage. In the candidate generation stage, we generate candidate designs by leveraging design principles as the guidance to make changes around the existing design. In the candidate evaluation stage, we learn a ranking model upon a dataset that can reflect humans aesthetic preference, and use it to choose the most aesthetically pleasing one from the generated candidates. We implement a prototype system on presentation slides and demonstrate the effectiveness of our approach through quantitative analysis, sample results, and user studies.",
"abstracts": [
{
"abstractType": "Regular",
"content": "During the creation of graphic designs, individuals inevitably spend a lot of time and effort on adjusting visual attributes (e.g., positions, colors, and fonts) of elements to make them more aesthetically pleasing. It is a trial-and-error process, requires repetitive edits, and relies on good design knowledge. In this work, we seek to alleviate such difficulty by automatically suggesting aesthetic improvements, i.e., taking an existing design as the input and generating a refined version with improved aesthetic quality as the output. This goal presents two challenges: proposing a refined design based on the user-given one, and assessing whether the new design is better aesthetically. To cope with these challenges, we propose a design principle-guided candidate generation stage and a data-driven candidate evaluation stage. In the candidate generation stage, we generate candidate designs by leveraging design principles as the guidance to make changes around the existing design. In the candidate evaluation stage, we learn a ranking model upon a dataset that can reflect humans aesthetic preference, and use it to choose the most aesthetically pleasing one from the generated candidates. We implement a prototype system on presentation slides and demonstrate the effectiveness of our approach through quantitative analysis, sample results, and user studies.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "During the creation of graphic designs, individuals inevitably spend a lot of time and effort on adjusting visual attributes (e.g., positions, colors, and fonts) of elements to make them more aesthetically pleasing. It is a trial-and-error process, requires repetitive edits, and relies on good design knowledge. In this work, we seek to alleviate such difficulty by automatically suggesting aesthetic improvements, i.e., taking an existing design as the input and generating a refined version with improved aesthetic quality as the output. This goal presents two challenges: proposing a refined design based on the user-given one, and assessing whether the new design is better aesthetically. To cope with these challenges, we propose a design principle-guided candidate generation stage and a data-driven candidate evaluation stage. In the candidate generation stage, we generate candidate designs by leveraging design principles as the guidance to make changes around the existing design. In the candidate evaluation stage, we learn a ranking model upon a dataset that can reflect humans aesthetic preference, and use it to choose the most aesthetically pleasing one from the generated candidates. We implement a prototype system on presentation slides and demonstrate the effectiveness of our approach through quantitative analysis, sample results, and user studies.",
"title": "Aesthetics++: Refining Graphic Designs by Exploring Design Principles and Human Preference",
"normalizedTitle": "Aesthetics++: Refining Graphic Designs by Exploring Design Principles and Human Preference",
"fno": "09714170",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Automatic Refinement Suggestion",
"Design Principles",
"Data Driven Approach",
"Aesthetic Quality F"
],
"authors": [
{
"givenName": "Wenyuan",
"surname": "Kong",
"fullName": "Wenyuan Kong",
"affiliation": "School of Earth and Space Sciences, Peking University, 12465 Beijing, Beijing, China, 100871",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhaoyun",
"surname": "Jiang",
"fullName": "Zhaoyun Jiang",
"affiliation": "School of Software Engineering, Xi'an Jiaotong University, 12480 Xi'an, Shaanxi, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shizhao",
"surname": "Sun",
"fullName": "Shizhao Sun",
"affiliation": "Software Analytics, Microsoft Research Asia, 216064 Beijing, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhuoning",
"surname": "Guo",
"fullName": "Zhuoning Guo",
"affiliation": "Faculty of Computing, Harbin Institute of Technology, 47822 Harbin, Heilongjiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Weiwei",
"surname": "Cui",
"fullName": "Weiwei Cui",
"affiliation": "Internet Graphics, Microsoft Research Asia, 216064 Beijing, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ting",
"surname": "Liu",
"fullName": "Ting Liu",
"affiliation": "School of Cyber Science and Engineering, Xi'an Jiaotong University, 12480 Xi'an, Shaanxi, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jian-Guang",
"surname": "Lou",
"fullName": "Jian-Guang Lou",
"affiliation": "Software Analytics, Microsoft Research Asia, 216064 Beijing, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dongmei",
"surname": "Zhang",
"fullName": "Dongmei Zhang",
"affiliation": "Software Analytics, Microsoft Research Asia, 216064 Beijing, Beijing, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-02-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/nicoint/2016/2305/0/2305a143",
"title": "Using Color to Improve the Discrimination and Aesthetics of Treemaps",
"doi": null,
"abstractUrl": "/proceedings-article/nicoint/2016/2305a143/12OmNrYlmIZ",
"parentPublication": {
"id": "proceedings/nicoint/2016/2305/0",
"title": "2016 Nicograph International (NicoInt)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/1995/3022/1/00483106",
"title": "Technological allusivity: appreciating and teaching the role of aesthetics in engineering design",
"doi": null,
"abstractUrl": "/proceedings-article/fie/1995/00483106/12OmNxuo0kE",
"parentPublication": {
"id": "proceedings/fie/1995/3022/1",
"title": "Proceedings Frontiers in Education 1995 25th Annual Conference. Engineering Education for the 21st Century",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995467",
"title": "High level describable attributes for predicting aesthetics and interestingness",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995467/12OmNxwncnj",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2005/9331/0/01521353",
"title": "Injection, detection and repair of aesthetics in home movies",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2005/01521353/12OmNy7yEio",
"parentPublication": {
"id": "proceedings/icme/2005/9331/0",
"title": "2005 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2019/07/08365844",
"title": "A Deep Network Solution for Attention and Aesthetics Aware Photo Cropping",
"doi": null,
"abstractUrl": "/journal/tp/2019/07/08365844/13rRUypp58W",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2018/8308/0/830800a091",
"title": "Analysis of Multidimensional Representation of Visual Aesthetic Elements in Graphic Design",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2018/830800a091/17D45WnnFWB",
"parentPublication": {
"id": "proceedings/icicta/2018/8308/0",
"title": "2018 11th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956162",
"title": "Image Aesthetics Assessment Using Graph Attention Network",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956162/1IHoPw01W9O",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300d333",
"title": "PFAGAN: An Aesthetics-Conditional GAN for Generating Photographic Fine Art",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300d333/1i5muBeiyGs",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2020/9134/0/913400a214",
"title": "Investigating aesthetics to afford more ‘felt’ knowledge and ‘meaningful’ navigation interface designs",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2020/913400a214/1rSR8vAIb9C",
"parentPublication": {
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icekim/2021/6834/0/683400a770",
"title": "Analysis of Ceramic Design Aesthetics under Computer Aided Design",
"doi": null,
"abstractUrl": "/proceedings-article/icekim/2021/683400a770/1vmLD98uOqY",
"parentPublication": {
"id": "proceedings/icekim/2021/6834/0",
"title": "2021 2nd International Conference on Education, Knowledge and Information Management (ICEKIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09707648",
"articleId": "1APlBguxdSw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09715049",
"articleId": "1B2DbhImWwE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1BefcLG0rXa",
"name": "ttg555501-09714170s1-supp1-3151617.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09714170s1-supp1-3151617.pdf",
"extension": "pdf",
"size": "18.7 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1AO2jecLFgQ",
"doi": "10.1109/TVCG.2022.3148419",
"abstract": "This article discusses the depth range which automultiscopic 3D (A3D) displays should reproduce for ensuring an adequate perceptual quality of substantially deep scenes. These displays usually need sufficient depth reconstruction capabilities covering the whole scene depth, but due to the inherent hardware restriction of these displays this is often difficult. Previous studies have addressed this limitation by depth compression that contracts the scene depth into a smaller depth range. The previous results showed that reconstructing only a physical depth of 1 m is needed to show scenes with much deeper depth and without large perceptual quality degradation. However, reconstructing a depth of 1 m is still challenging for actual A3D displays. In this study, focusing on a personal viewing situation, we introduce a dynamic depth compression that combines viewpoint tracking with the previous approach and examines the extent to which scene depths can be compressed while keeping the original perceptual quality. We performed an experiment with an A3D display simulator and found that a depth of just 10 cm was sufficient for showing deep scenes without inducing a feeling of unnaturalness. Next, we investigated whether the simulation results were valid even on a real A3D display and found that the dynamic approach induced better perceptual quality than the static one even on the real A3D display and that it had a depth enhancing effect without any hardware updates.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This article discusses the depth range which automultiscopic 3D (A3D) displays should reproduce for ensuring an adequate perceptual quality of substantially deep scenes. These displays usually need sufficient depth reconstruction capabilities covering the whole scene depth, but due to the inherent hardware restriction of these displays this is often difficult. Previous studies have addressed this limitation by depth compression that contracts the scene depth into a smaller depth range. The previous results showed that reconstructing only a physical depth of 1 m is needed to show scenes with much deeper depth and without large perceptual quality degradation. However, reconstructing a depth of 1 m is still challenging for actual A3D displays. In this study, focusing on a personal viewing situation, we introduce a dynamic depth compression that combines viewpoint tracking with the previous approach and examines the extent to which scene depths can be compressed while keeping the original perceptual quality. We performed an experiment with an A3D display simulator and found that a depth of just 10 cm was sufficient for showing deep scenes without inducing a feeling of unnaturalness. Next, we investigated whether the simulation results were valid even on a real A3D display and found that the dynamic approach induced better perceptual quality than the static one even on the real A3D display and that it had a depth enhancing effect without any hardware updates.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This article discusses the depth range which automultiscopic 3D (A3D) displays should reproduce for ensuring an adequate perceptual quality of substantially deep scenes. These displays usually need sufficient depth reconstruction capabilities covering the whole scene depth, but due to the inherent hardware restriction of these displays this is often difficult. Previous studies have addressed this limitation by depth compression that contracts the scene depth into a smaller depth range. The previous results showed that reconstructing only a physical depth of 1 m is needed to show scenes with much deeper depth and without large perceptual quality degradation. However, reconstructing a depth of 1 m is still challenging for actual A3D displays. In this study, focusing on a personal viewing situation, we introduce a dynamic depth compression that combines viewpoint tracking with the previous approach and examines the extent to which scene depths can be compressed while keeping the original perceptual quality. We performed an experiment with an A3D display simulator and found that a depth of just 10 cm was sufficient for showing deep scenes without inducing a feeling of unnaturalness. Next, we investigated whether the simulation results were valid even on a real A3D display and found that the dynamic approach induced better perceptual quality than the static one even on the real A3D display and that it had a depth enhancing effect without any hardware updates.",
"title": "Perceptual Assessment of Image and Depth Quality of Dynamically Depth-compressed Scene for Automultiscopic 3D Display",
"normalizedTitle": "Perceptual Assessment of Image and Depth Quality of Dynamically Depth-compressed Scene for Automultiscopic 3D Display",
"fno": "09706291",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Three Dimensional Displays",
"Image Reconstruction",
"Visualization",
"Stereo Image Processing",
"Geometry",
"Tracking",
"Real Time Systems",
"Compression Technologies",
"Depth Cues",
"Perception And Psychophysics",
"Volumetric"
],
"authors": [
{
"givenName": "Yamato",
"surname": "Miyashita",
"fullName": "Yamato Miyashita",
"affiliation": "Science and Technology Research Laboratories, Japan Broadcasting Corp, 13488 Kinuta, Setagaya-ku, Tokyo, Japan, 150-8001 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yasuhito",
"surname": "Sawahata",
"fullName": "Yasuhito Sawahata",
"affiliation": "Science and Technology Research Laboratories, Japan Broadcasting Corp, 13488 Kinuta, Setagaya-ku, Tokyo, Japan, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kazuteru",
"surname": "Komine",
"fullName": "Kazuteru Komine",
"affiliation": "Science and Technology Research Laboratories, Japan Broadcasting Corp, 13488 Kinuta, Setagaya-ku, Tokyo, Japan, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-02-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2014/6184/0/06948413",
"title": "A study of depth perception in hand-held augmented reality using autostereoscopic displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948413/12OmNAoUTpO",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2017/6067/0/08019542",
"title": "Quality assessment of multi-view-plus-depth images",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2017/08019542/12OmNBfZSmq",
"parentPublication": {
"id": "proceedings/icme/2017/6067/0",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2001/1143/1/00937507",
"title": "Efficient dense depth estimation from dense multiperspective panoramas",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2001/00937507/12OmNqzcvNb",
"parentPublication": {
"id": "proceedings/iccv/2001/1143/1",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2012/1611/0/06238907",
"title": "3D display size matters: Compensating for the perceptual effects of S3D display scaling",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2012/06238907/12OmNrJAdMH",
"parentPublication": {
"id": "proceedings/cvprw/2012/1611/0",
"title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460252",
"title": "Depth-map merging for Multi-View Stereo with high resolution images",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460252/12OmNwNwzMv",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2000/0750/1/07501295",
"title": "Integration of Perceptual Grouping and Depth",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2000/07501295/12OmNx76TQ1",
"parentPublication": {
"id": "proceedings/icpr/2000/0750/1",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836470",
"title": "A Hand-Held, Self-Contained Simulated Transparent Display",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836470/12OmNxy4N38",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/is3c/2016/3071/0/3071a283",
"title": "A Study of Effects of Perceptual Cues on Presence for the Elderly in 3D Virtual Store",
"doi": null,
"abstractUrl": "/proceedings-article/is3c/2016/3071a283/12OmNzUPpta",
"parentPublication": {
"id": "proceedings/is3c/2016/3071/0",
"title": "2016 International Symposium on Computer, Consumer and Control (IS3C)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000c031",
"title": "Salience Guided Depth Calibration for Perceptually Optimized Compressive Light Field 3D Display",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000c031/17D45VsBTZA",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09706326",
"articleId": "1AO2j4ICNLa",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09707648",
"articleId": "1APlBguxdSw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1APlCk8HrZC",
"name": "ttg555501-09706291s1-supp1-3148419.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09706291s1-supp1-3148419.mp4",
"extension": "mp4",
"size": "49.7 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1AO2j4ICNLa",
"doi": "10.1109/TVCG.2022.3148745",
"abstract": "We explore an online reinforcement learning (RL) paradigm to dynamically optimize parallel particle tracing performance in distributed-memory systems. Our method combines three novel components: (1) a work donation algorithm, (2) a high-order workload estimation model, and (3) a communication cost model. First, we design an RL-based work donation algorithm. Our algorithm monitors workloads of processes and creates RL agents to donate data blocks and particles from high-workload processes to low-workload processes to minimize program execution time. The agents learn the donation strategy on the fly based on reward and cost functions designed to consider processes' workload changes and data transfer costs of donation actions. Second, we propose a workload estimation model, helping RL agents estimate the workload distribution of processes in future computations. Third, we design a communication cost model that considers both block and particle data exchange costs, helping RL agents make effective decisions with minimized communication costs. We demonstrate that our algorithm adapts to different flow behaviors in large-scale fluid dynamics, ocean, and weather simulation data. Our algorithm improves parallel particle tracing performance in terms of parallel efficiency, load balance, and costs of I/O and communication for evaluations with up to 16,384 processors.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We explore an online reinforcement learning (RL) paradigm to dynamically optimize parallel particle tracing performance in distributed-memory systems. Our method combines three novel components: (1) a work donation algorithm, (2) a high-order workload estimation model, and (3) a communication cost model. First, we design an RL-based work donation algorithm. Our algorithm monitors workloads of processes and creates RL agents to donate data blocks and particles from high-workload processes to low-workload processes to minimize program execution time. The agents learn the donation strategy on the fly based on reward and cost functions designed to consider processes' workload changes and data transfer costs of donation actions. Second, we propose a workload estimation model, helping RL agents estimate the workload distribution of processes in future computations. Third, we design a communication cost model that considers both block and particle data exchange costs, helping RL agents make effective decisions with minimized communication costs. We demonstrate that our algorithm adapts to different flow behaviors in large-scale fluid dynamics, ocean, and weather simulation data. Our algorithm improves parallel particle tracing performance in terms of parallel efficiency, load balance, and costs of I/O and communication for evaluations with up to 16,384 processors.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We explore an online reinforcement learning (RL) paradigm to dynamically optimize parallel particle tracing performance in distributed-memory systems. Our method combines three novel components: (1) a work donation algorithm, (2) a high-order workload estimation model, and (3) a communication cost model. First, we design an RL-based work donation algorithm. Our algorithm monitors workloads of processes and creates RL agents to donate data blocks and particles from high-workload processes to low-workload processes to minimize program execution time. The agents learn the donation strategy on the fly based on reward and cost functions designed to consider processes' workload changes and data transfer costs of donation actions. Second, we propose a workload estimation model, helping RL agents estimate the workload distribution of processes in future computations. Third, we design a communication cost model that considers both block and particle data exchange costs, helping RL agents make effective decisions with minimized communication costs. We demonstrate that our algorithm adapts to different flow behaviors in large-scale fluid dynamics, ocean, and weather simulation data. Our algorithm improves parallel particle tracing performance in terms of parallel efficiency, load balance, and costs of I/O and communication for evaluations with up to 16,384 processors.",
"title": "Reinforcement Learning for Load-balanced Parallel Particle Tracing",
"normalizedTitle": "Reinforcement Learning for Load-balanced Parallel Particle Tracing",
"fno": "09706326",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Costs",
"Heuristic Algorithms",
"Estimation",
"Load Modeling",
"Data Models",
"Computational Modeling",
"Adaptation Models",
"Distributed And Parallel Particle Tracing",
"Dynamic Load Balancing",
"Reinforcement Learning"
],
"authors": [
{
"givenName": "Jiayi",
"surname": "Xu",
"fullName": "Jiayi Xu",
"affiliation": "Computer Science and Engineering, The Ohio State University, Columbus, Ohio, United States, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hanqi",
"surname": "Guo",
"fullName": "Hanqi Guo",
"affiliation": "Mathematics and Computer Science Division, Argonne National Laboratory, Argonne, Illinois, United States, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Han-Wei",
"surname": "Shen",
"fullName": "Han-Wei Shen",
"affiliation": "Department of Computer Science and Engineering, The Ohio State University, Columbus, Ohio, United States, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mukund",
"surname": "Raj",
"fullName": "Mukund Raj",
"affiliation": "Stanley Center for Psychiatric Research, Broad Institute of Harvard and MIT, Cambridge, Massachusetts, United States, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Skylar Wolfgang",
"surname": "Wurster",
"fullName": "Skylar Wolfgang Wurster",
"affiliation": "Computer Science and Engineering, The Ohio State University, Columbus, Ohio, United States, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tom",
"surname": "Peterka",
"fullName": "Tom Peterka",
"affiliation": "Mathematics and Computer Science Division, Argonne National Laboratory, Argonne, Illinois, United States, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-02-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/pacificvis/2018/1424/0/142401a086",
"title": "Dynamic Data Repartitioning for Load-Balanced Parallel Particle Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2018/142401a086/12OmNANBZoz",
"parentPublication": {
"id": "proceedings/pacificvis/2018/1424/0",
"title": "2018 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pdp/2014/2729/0/2729a500",
"title": "Energy-Efficient Virtual Machines Consolidation in Cloud Data Centers Using Reinforcement Learning",
"doi": null,
"abstractUrl": "/proceedings-article/pdp/2014/2729a500/12OmNrAdsuT",
"parentPublication": {
"id": "proceedings/pdp/2014/2729/0",
"title": "2014 22nd Euromicro International Conference on Parallel, Distributed and Network-Based Processing (PDP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017633",
"title": "Dynamic Load Balancing Based on Constrained K-D Tree Decomposition for Parallel Particle Tracing",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017633/13rRUwgQpqN",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2017/2715/0/08257928",
"title": "Elastic management of cloud applications using adaptive reinforcement learning",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2017/08257928/17D45W9KVIf",
"parentPublication": {
"id": "proceedings/big-data/2017/2715/0",
"title": "2017 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoin/2022/1332/0/09687277",
"title": "Reinforcement Learning based Load Balancing in a Distributed Heterogeneous Storage System",
"doi": null,
"abstractUrl": "/proceedings-article/icoin/2022/09687277/1AtQbPw61nW",
"parentPublication": {
"id": "proceedings/icoin/2022/1332/0",
"title": "2022 International Conference on Information Networking (ICOIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cloud/2019/2705/0/270500a329",
"title": "Horizontal and Vertical Scaling of Container-Based Applications Using Reinforcement Learning",
"doi": null,
"abstractUrl": "/proceedings-article/cloud/2019/270500a329/1cTJ4x4GABG",
"parentPublication": {
"id": "proceedings/cloud/2019/2705/0",
"title": "2019 IEEE 12th International Conference on Cloud Computing (CLOUD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/su/2021/03/08691496",
"title": "Deep Reinforcement Learning for Joint Datacenter and HVAC Load Control in Distributed Mixed-Use Buildings",
"doi": null,
"abstractUrl": "/journal/su/2021/03/08691496/1j4FFN5tvHi",
"parentPublication": {
"id": "trans/su",
"title": "IEEE Transactions on Sustainable Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2020/5697/0/09086288",
"title": "LBVis: Interactive Dynamic Load Balancing Visualization for Parallel Particle Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2020/09086288/1kuHlZvEurK",
"parentPublication": {
"id": "proceedings/pacificvis/2020/5697/0",
"title": "2020 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsn-w/2020/7263/0/09151835",
"title": "Blackbox Attacks on Reinforcement Learning Agents Using Approximated Temporal Information",
"doi": null,
"abstractUrl": "/proceedings-article/dsn-w/2020/09151835/1lRm2fSRYMo",
"parentPublication": {
"id": "proceedings/dsn-w/2020/7263/0",
"title": "2020 50th Annual IEEE/IFIP International Conference on Dependable Systems and Networks Workshops (DSN-W)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/12/09645253",
"title": "Temporal-Spatial Causal Interpretations for Vision-Based Reinforcement Learning",
"doi": null,
"abstractUrl": "/journal/tp/2022/12/09645253/1zc6yjofBSM",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09706291",
"articleId": "1AO2jecLFgQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09707648",
"articleId": "1APlBguxdSw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1APlBMV42bu",
"name": "ttg555501-09706326s1-supp1-3148745.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09706326s1-supp1-3148745.mp4",
"extension": "mp4",
"size": "13.8 MB",
"__typename": "WebExtraType"
},
{
"id": "1APlBtLHX5C",
"name": "ttg555501-09706326s1-supp2-3148745.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09706326s1-supp2-3148745.mp4",
"extension": "mp4",
"size": "6.19 MB",
"__typename": "WebExtraType"
},
{
"id": "1APlBAX4AaQ",
"name": "ttg555501-09706326s1-supp3-3148745.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09706326s1-supp3-3148745.mp4",
"extension": "mp4",
"size": "7.31 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1APlBguxdSw",
"doi": "10.1109/TVCG.2022.3149466",
"abstract": "We propose a new incompressible NavierStokes solver based on the impulse gauge transformation. The mathematical model of our approach draws from the impulsevelocity formulation of NavierStokes equations, which evolves the fluid impulse as an auxiliary variable of the system that can be projected to obtain the incompressible flow velocities at the end of each time step. We solve the impulse-form equations numerically on a Cartesian grid. At the heart of our simulation algorithm is a novel model to treat the impulse stretching and a harmonic boundary treatment to incorporate the surface tension effects accurately. We also build an impulse PIC/FLIP solver to support free-surface fluid simulation. Our impulse solver can naturally produce rich vortical flow details without artificial enhancements. We showcase this feature by using our solver to facilitate a wide range of fluid simulation tasks including smoke, liquid, and surface-tension flow. In addition, we discuss a convenient mechanism in our framework to control the scale and strength of the fluids turbulent effects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a new incompressible NavierStokes solver based on the impulse gauge transformation. The mathematical model of our approach draws from the impulsevelocity formulation of NavierStokes equations, which evolves the fluid impulse as an auxiliary variable of the system that can be projected to obtain the incompressible flow velocities at the end of each time step. We solve the impulse-form equations numerically on a Cartesian grid. At the heart of our simulation algorithm is a novel model to treat the impulse stretching and a harmonic boundary treatment to incorporate the surface tension effects accurately. We also build an impulse PIC/FLIP solver to support free-surface fluid simulation. Our impulse solver can naturally produce rich vortical flow details without artificial enhancements. We showcase this feature by using our solver to facilitate a wide range of fluid simulation tasks including smoke, liquid, and surface-tension flow. In addition, we discuss a convenient mechanism in our framework to control the scale and strength of the fluids turbulent effects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a new incompressible NavierStokes solver based on the impulse gauge transformation. The mathematical model of our approach draws from the impulsevelocity formulation of NavierStokes equations, which evolves the fluid impulse as an auxiliary variable of the system that can be projected to obtain the incompressible flow velocities at the end of each time step. We solve the impulse-form equations numerically on a Cartesian grid. At the heart of our simulation algorithm is a novel model to treat the impulse stretching and a harmonic boundary treatment to incorporate the surface tension effects accurately. We also build an impulse PIC/FLIP solver to support free-surface fluid simulation. Our impulse solver can naturally produce rich vortical flow details without artificial enhancements. We showcase this feature by using our solver to facilitate a wide range of fluid simulation tasks including smoke, liquid, and surface-tension flow. In addition, we discuss a convenient mechanism in our framework to control the scale and strength of the fluids turbulent effects.",
"title": "Impulse Fluid Simulation",
"normalizedTitle": "Impulse Fluid Simulation",
"fno": "09707648",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Mathematical Models",
"Numerical Models",
"Computational Modeling",
"Animation",
"Surface Tension",
"Computer Graphics",
"Harmonic Analysis",
"Fluid Simulation",
"Vortical Structures",
"Gauge Methods",
"Physics Based Animation"
],
"authors": [
{
"givenName": "Fan",
"surname": "Feng",
"fullName": "Fan Feng",
"affiliation": "Computer Science, Dartmouth College, 3728 Hanover, New Hampshire, United States, 03755-3529 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jinyuan",
"surname": "Liu",
"fullName": "Jinyuan Liu",
"affiliation": "Computer Science, Dartmouth College, 3728 Hanover, New Hampshire, United States, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shiying",
"surname": "Xiong",
"fullName": "Shiying Xiong",
"affiliation": "Computer Science, Dartmouth College, 3728 Hanover, New Hampshire, United States, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shuqi",
"surname": "Yang",
"fullName": "Shuqi Yang",
"affiliation": "Computer Science, Dartmouth College, 3728 Hanover, New Hampshire, United States, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yaorui",
"surname": "Zhang",
"fullName": "Yaorui Zhang",
"affiliation": "Computer Science, Dartmouth College, 3728 Hanover, New Hampshire, United States, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bo",
"surname": "Zhu",
"fullName": "Bo Zhu",
"affiliation": "Computer Science, Dartmouth College, 3728 Hanover, New Hampshire, United States, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-02-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icvrv/2015/7673/0/7673a295",
"title": "SPH-based Fluid Simulation with a New Surface Tension Formulation",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2015/7673a295/12OmNAS9zo4",
"parentPublication": {
"id": "proceedings/icvrv/2015/7673/0",
"title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2009/3789/0/3789a038",
"title": "Particle Importance Based Fluid Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2009/3789a038/12OmNAolHa9",
"parentPublication": {
"id": "proceedings/cgiv/2009/3789/0",
"title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2015/8020/0/07450421",
"title": "A New Surface Tension Formulation for SPH",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2015/07450421/12OmNButq4h",
"parentPublication": {
"id": "proceedings/cad-graphics/2015/8020/0",
"title": "2015 14th International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2004/2112/0/21120266",
"title": "Haptic Simulation of Linear Elastic Media with Fluid Pockets",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2004/21120266/12OmNvjyxRX",
"parentPublication": {
"id": "proceedings/haptics/2004/2112/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acomp/2015/8234/0/8234a112",
"title": "A New Hllem-Type Riemann Solver for Compressible Multi-phase Flows with Surface Tension",
"doi": null,
"abstractUrl": "/proceedings-article/acomp/2015/8234a112/12OmNzWx02Y",
"parentPublication": {
"id": "proceedings/acomp/2015/8234/0",
"title": "2015 International Conference on Advanced Computing and Applications (ACOMP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/09/08419266",
"title": "Continuous-Scale Kinetic Fluid Simulation",
"doi": null,
"abstractUrl": "/journal/tg/2019/09/08419266/13rRUwgyOjr",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/01/ttg2010010070",
"title": "Fluid Simulation with Articulated Bodies",
"doi": null,
"abstractUrl": "/journal/tg/2010/01/ttg2010010070/13rRUxDqS8f",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09907886",
"title": "A Current Loop Model for the Fast Simulation of Ferrofluids",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09907886/1HbasJ1nQTC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/10/09366379",
"title": "Incompressibility Enforcement for Multiple-Fluid SPH Using Deformation Gradient",
"doi": null,
"abstractUrl": "/journal/tg/2022/10/09366379/1rCc9A58Mec",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/09/09355005",
"title": "GPU Optimization for High-Quality Kinetic Fluid Simulation",
"doi": null,
"abstractUrl": "/journal/tg/2022/09/09355005/1rgClgs3uj6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09706291",
"articleId": "1AO2jecLFgQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09714170",
"articleId": "1B0Y0ltSIfK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1ASFlLOe75u",
"name": "ttg555501-09707648s1-tvcg-3149466-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09707648s1-tvcg-3149466-mm.zip",
"extension": "zip",
"size": "242 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1AIIbJW1goU",
"doi": "10.1109/TVCG.2022.3148107",
"abstract": "Graph Neural Networks (GNNs) aim to extend deep learning techniques to graph data and have achieved significant progress in graph analysis tasks (e.g., node classification) in recent years. However, similar to other deep neural networks like Convolutional Neural Networks (CNNs) and Recurrent Neural Networks (RNNs), GNNs behave like a black box with their details hidden from model developers and users. It is therefore difficult to diagnose possible errors of GNNs. Despite many visual analytics studies being done on CNNs and RNNs, little research has addressed the challenges for GNNs. This paper fills the research gap with an interactive visual analysis tool, GNNLens, to assist model developers and users in understanding and analyzing GNNs. Specifically, Parallel Sets View and Projection View enable users to quickly identify and validate error patterns in the set of wrong predictions; Graph View and Feature Matrix View offer a detailed analysis of individual nodes to assist users in forming hypotheses about the error patterns. Since GNNs jointly model the graph structure and the node features, we reveal the relative influences of the two types of information by comparing the predictions of three models: GNN, Multi-Layer Perceptron (MLP), and GNN Without Using Features (GNNWUF). Two case studies and interviews with domain experts demonstrate the effectiveness of GNNLens in facilitating the understanding of GNN models and their errors.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Graph Neural Networks (GNNs) aim to extend deep learning techniques to graph data and have achieved significant progress in graph analysis tasks (e.g., node classification) in recent years. However, similar to other deep neural networks like Convolutional Neural Networks (CNNs) and Recurrent Neural Networks (RNNs), GNNs behave like a black box with their details hidden from model developers and users. It is therefore difficult to diagnose possible errors of GNNs. Despite many visual analytics studies being done on CNNs and RNNs, little research has addressed the challenges for GNNs. This paper fills the research gap with an interactive visual analysis tool, GNNLens, to assist model developers and users in understanding and analyzing GNNs. Specifically, Parallel Sets View and Projection View enable users to quickly identify and validate error patterns in the set of wrong predictions; Graph View and Feature Matrix View offer a detailed analysis of individual nodes to assist users in forming hypotheses about the error patterns. Since GNNs jointly model the graph structure and the node features, we reveal the relative influences of the two types of information by comparing the predictions of three models: GNN, Multi-Layer Perceptron (MLP), and GNN Without Using Features (GNNWUF). Two case studies and interviews with domain experts demonstrate the effectiveness of GNNLens in facilitating the understanding of GNN models and their errors.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Graph Neural Networks (GNNs) aim to extend deep learning techniques to graph data and have achieved significant progress in graph analysis tasks (e.g., node classification) in recent years. However, similar to other deep neural networks like Convolutional Neural Networks (CNNs) and Recurrent Neural Networks (RNNs), GNNs behave like a black box with their details hidden from model developers and users. It is therefore difficult to diagnose possible errors of GNNs. Despite many visual analytics studies being done on CNNs and RNNs, little research has addressed the challenges for GNNs. This paper fills the research gap with an interactive visual analysis tool, GNNLens, to assist model developers and users in understanding and analyzing GNNs. Specifically, Parallel Sets View and Projection View enable users to quickly identify and validate error patterns in the set of wrong predictions; Graph View and Feature Matrix View offer a detailed analysis of individual nodes to assist users in forming hypotheses about the error patterns. Since GNNs jointly model the graph structure and the node features, we reveal the relative influences of the two types of information by comparing the predictions of three models: GNN, Multi-Layer Perceptron (MLP), and GNN Without Using Features (GNNWUF). Two case studies and interviews with domain experts demonstrate the effectiveness of GNNLens in facilitating the understanding of GNN models and their errors.",
"title": "GNNLens: A Visual Analytics Approach for Prediction Error Diagnosis of Graph Neural Networks",
"normalizedTitle": "GNNLens: A Visual Analytics Approach for Prediction Error Diagnosis of Graph Neural Networks",
"fno": "09705076",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Analytical Models",
"Deep Learning",
"Predictive Models",
"Visual Analytics",
"Data Models",
"Convolutional Neural Networks",
"Task Analysis",
"Graph Neural Networks",
"Error Diagnosis",
"Visualization"
],
"authors": [
{
"givenName": "Zhihua",
"surname": "Jin",
"fullName": "Zhihua Jin",
"affiliation": "Computer Science and Engineering, Hong Kong University of Science and Technology, 58207 Kowloon, Hong Kong, Hong Kong, 999077 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yong",
"surname": "Wang",
"fullName": "Yong Wang",
"affiliation": "School of Information Systems, Singapore Management University, 54756 Singapore, Singapore, Singapore, 178902 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Qianwen",
"surname": "Wang",
"fullName": "Qianwen Wang",
"affiliation": "Bioinfomatics, Harvard University, 1812 Cambridge, Massachusetts, United States, 02138 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yao",
"surname": "Ming",
"fullName": "Yao Ming",
"affiliation": "Computer Science and Engineering, Hong Kong University of Science and Technology, Hong Kong, Hong Kong, Hong Kong, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tengfei",
"surname": "Ma",
"fullName": "Tengfei Ma",
"affiliation": "AI, IBM, 3261 Armonk, New York, United States, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huamin",
"surname": "Qu",
"fullName": "Huamin Qu",
"affiliation": "The Department of Computer Science and Engineering, he Hong Kong University of Science and Technology, Hong Kong, HK, Hong Kong, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-02-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ism/2021/3734/0/373400a249",
"title": "Edge-Level Explanations for Graph Neural Networks by Extending Explainability Methods for Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2021/373400a249/1A3j8OOLHa0",
"parentPublication": {
"id": "proceedings/ism/2021/3734/0",
"title": "2021 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipccc/2021/4331/0/09679362",
"title": "Accelerate Graph Neural Network Training by Reusing Batch Data on GPUs",
"doi": null,
"abstractUrl": "/proceedings-article/ipccc/2021/09679362/1AjTn7cgpgs",
"parentPublication": {
"id": "proceedings/ipccc/2021/4331/0",
"title": "2021 IEEE International Performance, Computing, and Communications Conference (IPCCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/tps-isa/2021/1623/0/162300a011",
"title": "Membership Inference Attack on Graph Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/tps-isa/2021/162300a011/1CzeyRu16tG",
"parentPublication": {
"id": "proceedings/tps-isa/2021/1623/0",
"title": "2021 Third IEEE International Conference on Trust, Privacy and Security in Intelligent Systems and Applications (TPS-ISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpca/2022/2027/0/202700a429",
"title": "ReGNN: A Redundancy-Eliminated Graph Neural Networks Accelerator",
"doi": null,
"abstractUrl": "/proceedings-article/hpca/2022/202700a429/1Ds0aHT54Y0",
"parentPublication": {
"id": "proceedings/hpca/2022/2027/0",
"title": "2022 IEEE International Symposium on High-Performance Computer Architecture (HPCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2022/6819/0/09995685",
"title": "Glycan Immunogenicity Prediction with Efficient Automatic Graph Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2022/09995685/1JC1N5gNMUo",
"parentPublication": {
"id": "proceedings/bibm/2022/6819/0",
"title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispass/2021/8643/0/864300a118",
"title": "Performance Analysis of Graph Neural Network Frameworks",
"doi": null,
"abstractUrl": "/proceedings-article/ispass/2021/864300a118/1taFhPahQrK",
"parentPublication": {
"id": "proceedings/ispass/2021/8643/0",
"title": "2021 IEEE International Symposium on Performance Analysis of Systems and Software (ISPASS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09420254",
"title": "Visual Analytics for RNN-Based Deep Reinforcement Learning",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09420254/1tdUMGe1DAk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09413200",
"title": "Temporal Collaborative Filtering with Graph Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09413200/1tmiDHwRHeU",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2023/03/09540311",
"title": "Eigen-GNN: A Graph Structure Preserving Plug-in for GNNs",
"doi": null,
"abstractUrl": "/journal/tk/2023/03/09540311/1wWCbkQuLrG",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552902",
"title": "Interactive Visual Pattern Search on Graph Data via Graph Representation Learning",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552902/1xic4qsF8zK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09699035",
"articleId": "1ADJfMYBSCs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09705143",
"articleId": "1AIIcwNiqxq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1AO2jBC6HkI",
"name": "ttg555501-09705076s1-supp2-3148107.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09705076s1-supp2-3148107.pdf",
"extension": "pdf",
"size": "4.29 MB",
"__typename": "WebExtraType"
},
{
"id": "1AO2jQP49r2",
"name": "ttg555501-09705076s1-supp1-3148107.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09705076s1-supp1-3148107.mp4",
"extension": "mp4",
"size": "36.1 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1AIIcwNiqxq",
"doi": "10.1109/TVCG.2022.3148245",
"abstract": "Due to inevitable noises introduced during scanning and quantization, 3D reconstruction via RGB-D sensors suffers from errors both in geometry and texture, leading to artifacts such as camera drifting, mesh distortion, texture ghosting, and blurriness. Given an imperfect reconstructed 3D model, most previous methods have focused on refining either geometry, texture, or camera pose. Consequently, different optimization schemes and objectives for optimizing each component have been used in previous joint optimization methods, forming a complicated system. In this paper, we propose a novel optimization approach based on differentiable rendering, which integrates the optimization of camera pose, geometry, and texture into a unified framework by enforcing consistency between the rendered results and the corresponding RGB-D inputs. Based on the unified framework, we introduce a joint optimization approach to fully exploit the inter-relationships among the three objective components, and describe an adaptive interleaving strategy to improve optimization stability and efficiency. Using differentiable rendering, an image-level adversarial loss is applied to further improve the 3D model, making it more photorealistic. Experiments on synthetic and real data using quantitative and qualitative evaluation demonstrated the superiority of our approach in recovering both fine-scale geometry and high-fidelity texture.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Due to inevitable noises introduced during scanning and quantization, 3D reconstruction via RGB-D sensors suffers from errors both in geometry and texture, leading to artifacts such as camera drifting, mesh distortion, texture ghosting, and blurriness. Given an imperfect reconstructed 3D model, most previous methods have focused on refining either geometry, texture, or camera pose. Consequently, different optimization schemes and objectives for optimizing each component have been used in previous joint optimization methods, forming a complicated system. In this paper, we propose a novel optimization approach based on differentiable rendering, which integrates the optimization of camera pose, geometry, and texture into a unified framework by enforcing consistency between the rendered results and the corresponding RGB-D inputs. Based on the unified framework, we introduce a joint optimization approach to fully exploit the inter-relationships among the three objective components, and describe an adaptive interleaving strategy to improve optimization stability and efficiency. Using differentiable rendering, an image-level adversarial loss is applied to further improve the 3D model, making it more photorealistic. Experiments on synthetic and real data using quantitative and qualitative evaluation demonstrated the superiority of our approach in recovering both fine-scale geometry and high-fidelity texture.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Due to inevitable noises introduced during scanning and quantization, 3D reconstruction via RGB-D sensors suffers from errors both in geometry and texture, leading to artifacts such as camera drifting, mesh distortion, texture ghosting, and blurriness. Given an imperfect reconstructed 3D model, most previous methods have focused on refining either geometry, texture, or camera pose. Consequently, different optimization schemes and objectives for optimizing each component have been used in previous joint optimization methods, forming a complicated system. In this paper, we propose a novel optimization approach based on differentiable rendering, which integrates the optimization of camera pose, geometry, and texture into a unified framework by enforcing consistency between the rendered results and the corresponding RGB-D inputs. Based on the unified framework, we introduce a joint optimization approach to fully exploit the inter-relationships among the three objective components, and describe an adaptive interleaving strategy to improve optimization stability and efficiency. Using differentiable rendering, an image-level adversarial loss is applied to further improve the 3D model, making it more photorealistic. Experiments on synthetic and real data using quantitative and qualitative evaluation demonstrated the superiority of our approach in recovering both fine-scale geometry and high-fidelity texture.",
"title": "Adaptive Joint Optimization for 3D Reconstruction with Differentiable Rendering",
"normalizedTitle": "Adaptive Joint Optimization for 3D Reconstruction with Differentiable Rendering",
"fno": "09705143",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Cameras",
"Optimization",
"Geometry",
"Three Dimensional Displays",
"Solid Modeling",
"Image Reconstruction",
"Rendering Computer Graphics",
"Texture Optimization",
"Geometry Refinement",
"3 D Reconstruction",
"Adaptive Interleaving Strategy",
"Differentiable Rendering"
],
"authors": [
{
"givenName": "Jingbo",
"surname": "Zhang",
"fullName": "Jingbo Zhang",
"affiliation": "Computer Science, City University of Hong Kong, 53025 Hong Kong, Kowloon, Hong Kong, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ziyu",
"surname": "Wan",
"fullName": "Ziyu Wan",
"affiliation": "Computer Science, City University of Hong Kong, 53025 Kowloon, -, Hong Kong, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jing",
"surname": "Liao",
"fullName": "Jing Liao",
"affiliation": "Computer Science, City University of Hong Kong, 53025 Kowloon, Kowloon, Hong Kong, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-02-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2017/1032/0/1032d133",
"title": "Intrinsic3D: High-Quality 3D Reconstruction by Joint Appearance and Geometry Optimization with Spatially-Varying Lighting",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032d133/12OmNC4eSyL",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459378",
"title": "Superresolution texture maps for multiview reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459378/12OmNxuFBnH",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600i625",
"title": "Differentiable Stereopsis: Meshes from multiple views using differentiable rendering",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600i625/1H0NABVhMdO",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2018/8497/0/849700a001",
"title": "Keyframe-Based Texture Mapping for RGBD Human Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2018/849700a001/1a3x6hGWsso",
"parentPublication": {
"id": "proceedings/icvrv/2018/8497/0",
"title": "2018 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300c172",
"title": "3D Scene Reconstruction With Multi-Layer Depth and Epipolar Transformers",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300c172/1hVlfLRJFS0",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800d501",
"title": "Differentiable Volumetric Rendering: Learning Implicit 3D Representations Without 3D Supervision",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800d501/1m3nwXQXEAw",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800f949",
"title": "Joint Texture and Geometry Optimization for RGB-D Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800f949/1m3ogA88vw4",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800b186",
"title": "Shape from Tracing: Towards Reconstructing 3D Object Geometry and SVBRDF Material from Images via Differentiable Path Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800b186/1qyxkY66O08",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552224",
"title": "Differentiable Direct Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552224/1xibZvRmYzm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900h115",
"title": "NeuTex: Neural Texture Mapping for Volumetric Neural Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900h115/1yeLdyIKnV6",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09705076",
"articleId": "1AIIbJW1goU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09706326",
"articleId": "1AO2j4ICNLa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1ADJfMYBSCs",
"doi": "10.1109/TVCG.2022.3148007",
"abstract": "Utilizing Visualization-oriented Natural Language Interfaces (V-NLI) as a complementary input modality to direct manipulation for visual analytics can provide an engaging user experience. It enables users to focus on their tasks rather than having to worry about how to operate visualization tools on the interface. In the past two decades, leveraging advanced natural language processing technologies, numerous V-NLI systems have been developed in academic research and commercial software, especially in recent years. In this article, we conduct a comprehensive review of the existing V-NLIs. In order to classify each paper, we develop categorical dimensions based on a classic information visualization pipeline with the extension of a V-NLI layer. The following seven stages are used: query interpretation, data transformation, visual mapping, view transformation, human interaction, dialogue management, and presentation. Finally, we also shed light on several promising directions for future work in the V-NLI community.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Utilizing Visualization-oriented Natural Language Interfaces (V-NLI) as a complementary input modality to direct manipulation for visual analytics can provide an engaging user experience. It enables users to focus on their tasks rather than having to worry about how to operate visualization tools on the interface. In the past two decades, leveraging advanced natural language processing technologies, numerous V-NLI systems have been developed in academic research and commercial software, especially in recent years. In this article, we conduct a comprehensive review of the existing V-NLIs. In order to classify each paper, we develop categorical dimensions based on a classic information visualization pipeline with the extension of a V-NLI layer. The following seven stages are used: query interpretation, data transformation, visual mapping, view transformation, human interaction, dialogue management, and presentation. Finally, we also shed light on several promising directions for future work in the V-NLI community.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Utilizing Visualization-oriented Natural Language Interfaces (V-NLI) as a complementary input modality to direct manipulation for visual analytics can provide an engaging user experience. It enables users to focus on their tasks rather than having to worry about how to operate visualization tools on the interface. In the past two decades, leveraging advanced natural language processing technologies, numerous V-NLI systems have been developed in academic research and commercial software, especially in recent years. In this article, we conduct a comprehensive review of the existing V-NLIs. In order to classify each paper, we develop categorical dimensions based on a classic information visualization pipeline with the extension of a V-NLI layer. The following seven stages are used: query interpretation, data transformation, visual mapping, view transformation, human interaction, dialogue management, and presentation. Finally, we also shed light on several promising directions for future work in the V-NLI community.",
"title": "Towards Natural Language Interfaces for Data Visualization: A Survey",
"normalizedTitle": "Towards Natural Language Interfaces for Data Visualization: A Survey",
"fno": "09699035",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Visualization",
"Natural Language Processing",
"Task Analysis",
"Human Computer Interaction",
"Software",
"Data Mining",
"Data Visualization",
"Natural Language Interfaces",
"Survey"
],
"authors": [
{
"givenName": "Leixian",
"surname": "Shen",
"fullName": "Leixian Shen",
"affiliation": "School of Software, Tsinghua University, 12442 Beijing, Beijing, China, 100084 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Enya",
"surname": "Shen",
"fullName": "Enya Shen",
"affiliation": "School of Software, Tsinghua University, 12442 Beijing, Beijing, China, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yuyu",
"surname": "Luo",
"fullName": "Yuyu Luo",
"affiliation": "CS Department, Tsinghua University, 12442 Beijing, Beijing, China, 100084 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaocong",
"surname": "Yang",
"fullName": "Xiaocong Yang",
"affiliation": "School of economics and management, Tsinghua University, 12442 Beijing, Beijing, China, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xuming",
"surname": "Hu",
"fullName": "Xuming Hu",
"affiliation": "School of Software, Tsinghua University, 12442 Beijing, Beijing, China, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiongshuai",
"surname": "Zhang",
"fullName": "Xiongshuai Zhang",
"affiliation": "School of Software, Tsinghua University, 12442 Beijing, Beijing, China, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhiwei",
"surname": "Tai",
"fullName": "Zhiwei Tai",
"affiliation": "School of Software, Tsinghua University, 12442 Beijing, Beijing, China, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jianmin",
"surname": "Wang",
"fullName": "Jianmin Wang",
"affiliation": "School of Software, Tsinghua University, 12442 Beijing, Beijing, China, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-01-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "mags/ic/2015/06/mic2015060060",
"title": "Natural Interaction with Visualization Systems",
"doi": null,
"abstractUrl": "/magazine/ic/2015/06/mic2015060060/13rRUxCRFSG",
"parentPublication": {
"id": "mags/ic",
"title": "IEEE Internet Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/06/07018997",
"title": "Query2Question: Translating Visualization Interaction into Natural Language",
"doi": null,
"abstractUrl": "/journal/tg/2015/06/07018997/13rRUy0HYRr",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/issre/2021/2587/0/258700a220",
"title": "Evaluating Natural Language Inference Models: A Metamorphic Testing Approach",
"doi": null,
"abstractUrl": "/proceedings-article/issre/2021/258700a220/1AUp3cpQCIg",
"parentPublication": {
"id": "proceedings/issre/2021/2587/0",
"title": "2021 IEEE 32nd International Symposium on Software Reliability Engineering (ISSRE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09912366",
"title": "Towards Natural Language-Based Visualization Authoring",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09912366/1HeiWkRN3tC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2022/8812/0/881200a006",
"title": "Facilitating Conversational Interaction in Natural Language Interfaces for Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2022/881200a006/1J6hcTVtKNy",
"parentPublication": {
"id": "proceedings/vis/2022/8812/0",
"title": "2022 IEEE Visualization and Visual Analytics (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2022/9007/0/900700a142",
"title": "Natural Language Interface for Data Visualization with Deep Learning Based Language Models",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2022/900700a142/1KaH35KSrrW",
"parentPublication": {
"id": "proceedings/iv/2022/9007/0",
"title": "2022 26th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10026499",
"title": "XNLI: Explaining and Diagnosing NLI-based Visual Data Analysis",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10026499/1KkXscJg6vm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2020/04/09118800",
"title": "How to Ask What to Say?: Strategies for Evaluating Natural Language Interfaces for Data Visualization",
"doi": null,
"abstractUrl": "/magazine/cg/2020/04/09118800/1kHUNLgZhSM",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222342",
"title": "NL4DV: A Toolkit for Generating Analytic Specifications for Data Visualization from Natural Language Queries",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222342/1nTqOo5NR3G",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09617561",
"title": "Natural Language to Visualization by Neural Machine Translation",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09617561/1yA76vDzhhC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09695246",
"articleId": "1AvqJVgygfe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09705076",
"articleId": "1AIIbJW1goU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1AvqJqAJOKY",
"doi": "10.1109/TVCG.2022.3146329",
"abstract": "We present Roslingifier, a data-driven storytelling method for animated scatterplots. Like its namesake, Hans Rosling (1948--2017), a professor of public health and a spellbinding public speaker, Roslingifier turns a sequence of entities changing over time---such as countries and continents with their demographic data---into an engaging narrative telling the story of the data. This data-driven storytelling method with an in-person presenter is a new genre of storytelling technique and has never been studied before. In this paper, we aim to define a design space for this new genre---data presentation---and provide a semi-automated authoring tool for helping presenters create quality presentations. From an in-depth analysis of video clips of presentations using interactive visualizations, we derive three specific techniques to achieve this: natural language narratives, visual effects that highlight events, and temporal branching that changes playback time of the animation. Our implementation of the Roslingifier method is capable of identifying and clustering significant movements, automatically generating visual highlighting and a narrative for playback, and enabling the user to customize. From two user studies, we show that Roslingifier allows users to effectively create engaging data stories and the system features help both presenters and viewers find diverse insights.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present Roslingifier, a data-driven storytelling method for animated scatterplots. Like its namesake, Hans Rosling (1948--2017), a professor of public health and a spellbinding public speaker, Roslingifier turns a sequence of entities changing over time---such as countries and continents with their demographic data---into an engaging narrative telling the story of the data. This data-driven storytelling method with an in-person presenter is a new genre of storytelling technique and has never been studied before. In this paper, we aim to define a design space for this new genre---data presentation---and provide a semi-automated authoring tool for helping presenters create quality presentations. From an in-depth analysis of video clips of presentations using interactive visualizations, we derive three specific techniques to achieve this: natural language narratives, visual effects that highlight events, and temporal branching that changes playback time of the animation. Our implementation of the Roslingifier method is capable of identifying and clustering significant movements, automatically generating visual highlighting and a narrative for playback, and enabling the user to customize. From two user studies, we show that Roslingifier allows users to effectively create engaging data stories and the system features help both presenters and viewers find diverse insights.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present Roslingifier, a data-driven storytelling method for animated scatterplots. Like its namesake, Hans Rosling (1948--2017), a professor of public health and a spellbinding public speaker, Roslingifier turns a sequence of entities changing over time---such as countries and continents with their demographic data---into an engaging narrative telling the story of the data. This data-driven storytelling method with an in-person presenter is a new genre of storytelling technique and has never been studied before. In this paper, we aim to define a design space for this new genre---data presentation---and provide a semi-automated authoring tool for helping presenters create quality presentations. From an in-depth analysis of video clips of presentations using interactive visualizations, we derive three specific techniques to achieve this: natural language narratives, visual effects that highlight events, and temporal branching that changes playback time of the animation. Our implementation of the Roslingifier method is capable of identifying and clustering significant movements, automatically generating visual highlighting and a narrative for playback, and enabling the user to customize. From two user studies, we show that Roslingifier allows users to effectively create engaging data stories and the system features help both presenters and viewers find diverse insights.",
"title": "Roslingifier: Semi-Automated Storytelling for Animated Scatterplots",
"normalizedTitle": "Roslingifier: Semi-Automated Storytelling for Animated Scatterplots",
"fno": "09695173",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Visualization",
"Annotations",
"Visual Effects",
"Streaming Media",
"Organizations",
"Natural Languages",
"Data Driven Storytelling",
"Narrative Visualization",
"Hans Rosling",
"Gapminder",
"Trendalyzer"
],
"authors": [
{
"givenName": "Minjeong",
"surname": "Shin",
"fullName": "Minjeong Shin",
"affiliation": "College of Engineering and Computer Science, Australian National University, 2219 Canberra, Australian Capital Territory, Australia, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Joohee",
"surname": "Kim",
"fullName": "Joohee Kim",
"affiliation": "School of Computer Science and Engineering, Ulsan National Institute of Science and Technology, 131639 Ulsan, Ulsan, Korea (the Republic of), (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yunha",
"surname": "Han",
"fullName": "Yunha Han",
"affiliation": "School of Computer Science and Engineering, Ulsan National Institute of Science and Technology, 131639 Ulsan, Ulsan, Korea (the Republic of), (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lexing",
"surname": "Xie",
"fullName": "Lexing Xie",
"affiliation": "College of Engineering and Computer Science, Australian National University, 2219 Canberra, Australian Capital Territory, Australia, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mitchell",
"surname": "Whitelaw",
"fullName": "Mitchell Whitelaw",
"affiliation": "School of Art and Design, Australian National University, 2219 Canberra, Australian Capital Territory, Australia, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bum Chul",
"surname": "Kwon",
"fullName": "Bum Chul Kwon",
"affiliation": "Health Analytics Research Group, IBM Research, 74286 Cambridge, Massachusetts, United States, 02142 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sungahn",
"surname": "Ko",
"fullName": "Sungahn Ko",
"affiliation": "School of Computer Science and Engineering, Ulsan National Institute of Science and Technology, 131639 Ulsan, Ulsan, Korea (the Republic of), (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Niklas",
"surname": "Elmqvist",
"fullName": "Niklas Elmqvist",
"affiliation": "College of Information Studies, University of Maryland at College Park, 1068 College Park, Maryland, United States, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-01-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/pacificvis/2017/5738/0/08031599",
"title": "ChartAccent: Annotation for data-driven storytelling",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2017/08031599/12OmNxEjY7F",
"parentPublication": {
"id": "proceedings/pacificvis/2017/5738/0",
"title": "2017 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aina/2011/4337/0/4337a270",
"title": "Determining Writing Genre: Towards a Rubric-based Approach to Automated Essay Grading",
"doi": null,
"abstractUrl": "/proceedings-article/aina/2011/4337a270/12OmNxwENJ7",
"parentPublication": {
"id": "proceedings/aina/2011/4337/0",
"title": "2011 IEEE International Conference on Advanced Information Networking and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2013/05/mco2013050044",
"title": "Storytelling: The Next Step for Visualization",
"doi": null,
"abstractUrl": "/magazine/co/2013/05/mco2013050044/13rRUx0xQ3b",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/09/07581076",
"title": "Timelines Revisited: A Design Space and Considerations for Expressive Storytelling",
"doi": null,
"abstractUrl": "/journal/tg/2017/09/07581076/13rRUxYrbUN",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2012/01/mcg2012010012",
"title": "Scientific Storytelling Using Visualization",
"doi": null,
"abstractUrl": "/magazine/cg/2012/01/mcg2012010012/13rRUyuegjn",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/5555/01/10107759",
"title": "The Stories We Tell About Data: Surveying Data-Driven Storytelling Using Visualization",
"doi": null,
"abstractUrl": "/magazine/cg/5555/01/10107759/1MDGmTM8oOA",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2019/5604/0/560400a102",
"title": "A Live Storytelling Virtual Reality System with Programmable Cartoon-Style Emotion Embodiment",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2019/560400a102/1grOlrH5hdK",
"parentPublication": {
"id": "proceedings/aivr/2019/5604/0",
"title": "2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/anivae/2019/3229/0/09050929",
"title": "Everything must change? Challenges for animated storytelling in VR",
"doi": null,
"abstractUrl": "/proceedings-article/anivae/2019/09050929/1iHTaxDewDK",
"parentPublication": {
"id": "proceedings/anivae/2019/3229/0",
"title": "2019 IEEE 2nd Workshop on Animation in Virtual and Augmented Environments (ANIVAE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciddt/2020/0367/0/036700a112",
"title": "Rethinking of Artificial Intelligence Storytelling of Digital Media",
"doi": null,
"abstractUrl": "/proceedings-article/iciddt/2020/036700a112/1wutIsprwIw",
"parentPublication": {
"id": "proceedings/iciddt/2020/0367/0",
"title": "2020 International Conference on Innovation Design and Digital Technology (ICIDDT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/02/09547737",
"title": "ChartStory: Automated Partitioning, Layout, and Captioning of Charts into Comic-Style Narratives",
"doi": null,
"abstractUrl": "/journal/tg/2023/02/09547737/1x9TL0bvSlq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09693178",
"articleId": "1As7aEVtgNW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09695348",
"articleId": "1AvqJHyHeO4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1Ax5JGSEIzS",
"name": "ttg555501-09695173s1-supp1-3146329.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09695173s1-supp1-3146329.pdf",
"extension": "pdf",
"size": "3.76 MB",
"__typename": "WebExtraType"
},
{
"id": "1Ax5JrHY3w4",
"name": "ttg555501-09695173s1-supp2-3146329.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09695173s1-supp2-3146329.mp4",
"extension": "mp4",
"size": "93.1 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1AvqJVgygfe",
"doi": "10.1109/TVCG.2022.3146806",
"abstract": "Businesses in high-risk environments have been reluctant to adopt modern machine learning approaches due to their complex and uninterpretable nature. Most current solutions provide local, instance-level explanations, but this is insufficient for understanding the model as a whole. In this work, we show that strategy clusters (i.e., groups of data instances that are treated distinctly by the model) can be used to understand the global behavior of a complex ML model. To support effective exploration and understanding of these clusters, we introduce StrategyAtlas, a system designed to analyze and explain model strategies. Furthermore, it supports multiple ways to utilize these strategies for simplifying and improving the reference model. In collaboration with a large insurance company, we present a use case in automatic insurance acceptance, and show how professional data scientists were enabled to understand a complex model and improve the production model based on these insights.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Businesses in high-risk environments have been reluctant to adopt modern machine learning approaches due to their complex and uninterpretable nature. Most current solutions provide local, instance-level explanations, but this is insufficient for understanding the model as a whole. In this work, we show that strategy clusters (i.e., groups of data instances that are treated distinctly by the model) can be used to understand the global behavior of a complex ML model. To support effective exploration and understanding of these clusters, we introduce StrategyAtlas, a system designed to analyze and explain model strategies. Furthermore, it supports multiple ways to utilize these strategies for simplifying and improving the reference model. In collaboration with a large insurance company, we present a use case in automatic insurance acceptance, and show how professional data scientists were enabled to understand a complex model and improve the production model based on these insights.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Businesses in high-risk environments have been reluctant to adopt modern machine learning approaches due to their complex and uninterpretable nature. Most current solutions provide local, instance-level explanations, but this is insufficient for understanding the model as a whole. In this work, we show that strategy clusters (i.e., groups of data instances that are treated distinctly by the model) can be used to understand the global behavior of a complex ML model. To support effective exploration and understanding of these clusters, we introduce StrategyAtlas, a system designed to analyze and explain model strategies. Furthermore, it supports multiple ways to utilize these strategies for simplifying and improving the reference model. In collaboration with a large insurance company, we present a use case in automatic insurance acceptance, and show how professional data scientists were enabled to understand a complex model and improve the production model based on these insights.",
"title": "StrategyAtlas: Strategy Analysis for Machine Learning Interpretability",
"normalizedTitle": "StrategyAtlas: Strategy Analysis for Machine Learning Interpretability",
"fno": "09695246",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Models",
"Analytical Models",
"Machine Learning",
"Predictive Models",
"Computational Modeling",
"Insurance",
"Data Visualization",
"Visual Analytics",
"Machine Learning",
"Explainable AI"
],
"authors": [
{
"givenName": "Dennis",
"surname": "Collaris",
"fullName": "Dennis Collaris",
"affiliation": "Mathematics and Computer Science, Eindhoven University of Technology, 3169 Eindhoven, Noord-Brabant, Netherlands, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jarke",
"surname": "Van Wijk",
"fullName": "Jarke Van Wijk",
"affiliation": "Mathematics and Computer Science, Eindhoven University of Technology, 3169 Eindhoven, Noord-Brabant, Netherlands, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-01-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icdma/2013/5016/0/5016b069",
"title": "Research on Insurance Strategy Simulation Model of Engineering Project",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2013/5016b069/12OmNqFJhSW",
"parentPublication": {
"id": "proceedings/icdma/2013/5016/0",
"title": "2013 Fourth International Conference on Digital Manufacturing & Automation (ICDMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bife/2013/4777/0/4777a382",
"title": "Theoretical Mechanism and Empirical Analysis about the Impact on Insurance Intervene SME Financing",
"doi": null,
"abstractUrl": "/proceedings-article/bife/2013/4777a382/12OmNyuPKZQ",
"parentPublication": {
"id": "proceedings/bife/2013/4777/0",
"title": "2013 Sixth International Conference on Business Intelligence and Financial Engineering (BIFE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2017/2715/0/08258557",
"title": "A study on interpretability of decision of machine learning",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2017/08258557/17D45WODasZ",
"parentPublication": {
"id": "proceedings/big-data/2017/2715/0",
"title": "2017 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdsba/2018/8431/0/843100a411",
"title": "Analysis of the Solvency Adequacy Ratio for Small and Medium-Sized Non-Life Insurance Companies in China",
"doi": null,
"abstractUrl": "/proceedings-article/icdsba/2018/843100a411/17D45WYQJ6Q",
"parentPublication": {
"id": "proceedings/icdsba/2018/8431/0",
"title": "2018 2nd International Conference on Data Science and Business Analytics (ICDSBA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iceitsa/2021/1300/0/130000a478",
"title": "Machine Learning in Insurance Underwriting Context",
"doi": null,
"abstractUrl": "/proceedings-article/iceitsa/2021/130000a478/1B2HqYhnS9y",
"parentPublication": {
"id": "proceedings/iceitsa/2021/1300/0",
"title": "2021 International Conference on Electronic Information Technology and Smart Agriculture (ICEITSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2022/9978/0/997800a725",
"title": "Agricultural insurance demand forecasting model based on support vector machine",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2022/997800a725/1Byem6zXua4",
"parentPublication": {
"id": "proceedings/icmtma/2022/9978/0",
"title": "2022 14th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2021/0679/0/067900a475",
"title": "Interpretability Analysis of Academic Achievement Prediction Based on Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2021/067900a475/1CATBZ3QJeU",
"parentPublication": {
"id": "proceedings/itme/2021/0679/0",
"title": "2021 11th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2019/2286/0/228600a275",
"title": "Interpretability in HealthCare A Comparative Study of Local Machine Learning Interpretability Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2019/228600a275/1cdNZ7L3f0c",
"parentPublication": {
"id": "proceedings/cbms/2019/2286/0",
"title": "2019 IEEE 32nd International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bdeim/2020/0331/0/033100a013",
"title": "Research on the Innovation of China's Pension Insurance Transfer and Succession Model——Based on Big Data Technology",
"doi": null,
"abstractUrl": "/proceedings-article/bdeim/2020/033100a013/1sZ3deBOAfe",
"parentPublication": {
"id": "proceedings/bdeim/2020/0331/0",
"title": "2020 International Conference on Big Data Economy and Information Management (BDEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/caibda/2021/2490/0/249000a009",
"title": "Research on Mobile Caravan Insurance Recommendation Method Based on Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/caibda/2021/249000a009/1xgBlH67oti",
"parentPublication": {
"id": "proceedings/caibda/2021/2490/0",
"title": "2021 International Conference on Artificial Intelligence, Big Data and Algorithms (CAIBDA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09695348",
"articleId": "1AvqJHyHeO4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09699035",
"articleId": "1ADJfMYBSCs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1Ax5KE3s8LK",
"name": "ttg555501-09695246s1-supp1-3146806.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09695246s1-supp1-3146806.mp4",
"extension": "mp4",
"size": "27 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1AvqJHyHeO4",
"doi": "10.1109/TVCG.2022.3146508",
"abstract": "The development of digitized humanity information provides a new perspective on data-oriented studies of history. Many previous studies have ignored uncertainty in the exploration of historical figures and events, which has limited the capability of researchers to capture complex processes associated with historical phenomena. We propose a visual reasoning system to support visual reasoning of uncertainty associated with spatio-temporal events of historical figures based on data from the China Biographical Database Project. We build a knowledge graph of entities extracted from a historical database to capture uncertainty generated by missing data and error. The proposed system uses an overview of chronology, a map view, and an interpersonal relation matrix to describe and analyse heterogeneous information of events. The system also includes uncertainty visualization to identify uncertain events with missing or imprecise spatio-temporal information. Results from case studies and expert evaluations suggest that the visual reasoning system is able to quantify and reduce uncertainty generated by the data.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The development of digitized humanity information provides a new perspective on data-oriented studies of history. Many previous studies have ignored uncertainty in the exploration of historical figures and events, which has limited the capability of researchers to capture complex processes associated with historical phenomena. We propose a visual reasoning system to support visual reasoning of uncertainty associated with spatio-temporal events of historical figures based on data from the China Biographical Database Project. We build a knowledge graph of entities extracted from a historical database to capture uncertainty generated by missing data and error. The proposed system uses an overview of chronology, a map view, and an interpersonal relation matrix to describe and analyse heterogeneous information of events. The system also includes uncertainty visualization to identify uncertain events with missing or imprecise spatio-temporal information. Results from case studies and expert evaluations suggest that the visual reasoning system is able to quantify and reduce uncertainty generated by the data.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The development of digitized humanity information provides a new perspective on data-oriented studies of history. Many previous studies have ignored uncertainty in the exploration of historical figures and events, which has limited the capability of researchers to capture complex processes associated with historical phenomena. We propose a visual reasoning system to support visual reasoning of uncertainty associated with spatio-temporal events of historical figures based on data from the China Biographical Database Project. We build a knowledge graph of entities extracted from a historical database to capture uncertainty generated by missing data and error. The proposed system uses an overview of chronology, a map view, and an interpersonal relation matrix to describe and analyse heterogeneous information of events. The system also includes uncertainty visualization to identify uncertain events with missing or imprecise spatio-temporal information. Results from case studies and expert evaluations suggest that the visual reasoning system is able to quantify and reduce uncertainty generated by the data.",
"title": "Visual Reasoning for Uncertainty in Spatio-temporal Events of Historical Figures",
"normalizedTitle": "Visual Reasoning for Uncertainty in Spatio-temporal Events of Historical Figures",
"fno": "09695348",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Uncertainty",
"Visualization",
"Cognition",
"Data Visualization",
"Biographies",
"Task Analysis",
"Data Mining",
"History",
"Uncertainty",
"Spatio Temporal Events",
"Visual Reasoning"
],
"authors": [
{
"givenName": "Wei",
"surname": "Zhang",
"fullName": "Wei Zhang",
"affiliation": "Computer Scienece, Zhejiang University, 12377 Hangzhou, Zhejiang, China, 310058 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Siwei",
"surname": "Tan",
"fullName": "Siwei Tan",
"affiliation": "Computer Science, Zhejiang University, 12377 Hangzhou, Zhejiang, China, 310058 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Siming",
"surname": "Chen",
"fullName": "Siming Chen",
"affiliation": "School of Data Science, Fudan University, 12478 Shanghai, Shanghai, China, 200433 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Linhao",
"surname": "Meng",
"fullName": "Linhao Meng",
"affiliation": "Department of Mathematics and Computer Science, University of Technology Eindhoven Department of Mathematics and Computer Science, 215692 Eindhoven, North Brabant, Netherlands, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tianye",
"surname": "Zhang",
"fullName": "Tianye Zhang",
"affiliation": "the state key lab of CAD&CG, Zhejiang University, Hangzhou, Zhejiang, China, 310058 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rongchen",
"surname": "Zhu",
"fullName": "Rongchen Zhu",
"affiliation": "School of Software Technology, Zhejiang University, 12377 Hangzhou, Zhejiang, China, 310058 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wei",
"surname": "Chen",
"fullName": "Wei Chen",
"affiliation": "Zhejiang University, State Key Lab of CAD&CG, Hangzhou, Zhejiang, China, 310058 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-01-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/wi-iat/2014/4143/2/4143b487",
"title": "Uncertainty Reasoning Based Formal Framework for Big Video Data Understanding",
"doi": null,
"abstractUrl": "/proceedings-article/wi-iat/2014/4143b487/12OmNvq5jv8",
"parentPublication": {
"id": "wi-iat/2014/4143/2",
"title": "2014 IEEE/WIC/ACM International Joint Conferences on Web Intelligence (WI) and Intelligent Agent Technologies (IAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2012/4771/0/4771a283",
"title": "Visualization for Changes in Relationships between Historical Figures in Chronicles",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2012/4771a283/12OmNy7yEcR",
"parentPublication": {
"id": "proceedings/iv/2012/4771/0",
"title": "2012 16th International Conference on Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017624",
"title": "Imagining Replications: Graphical Prediction & Discrete Visualizations Improve Recall & Estimation of Effect Uncertainty",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017624/13rRUIM2VH5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09772276",
"title": "Practice improves performance of a 2D uncertainty integration task within and across visualizations",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09772276/1DgjDz35pfi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904442",
"title": "Communicating Uncertainty in Digital Humanities Visualization Research",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904442/1H1gpt871W8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2019/0801/0/08940309",
"title": "Visualization Analysis of Social Networks of Chinese Historical Figures : Take the Study of Literati of Song Dynasty as an Example",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2019/08940309/1gjRQLb3Tkk",
"parentPublication": {
"id": "proceedings/icis/2019/0801/0",
"title": "2019 IEEE/ACIS 18th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2020/5697/0/09086235",
"title": "Uncertainty Treemaps",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2020/09086235/1kuHly3FEwU",
"parentPublication": {
"id": "proceedings/pacificvis/2020/5697/0",
"title": "2020 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cogmi/2020/4144/0/414400a109",
"title": "Causality and Uncertainty of Information for Content Understanding",
"doi": null,
"abstractUrl": "/proceedings-article/cogmi/2020/414400a109/1qyxR7c0BEs",
"parentPublication": {
"id": "proceedings/cogmi/2020/4144/0",
"title": "2020 IEEE Second International Conference on Cognitive Machine Intelligence (CogMI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552887",
"title": "Examining Effort in 1D Uncertainty Communication Using Individual Differences in Working Memory and NASA-TLX",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552887/1xibWySUs4U",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2021/3335/0/333500a156",
"title": "Visually Connecting Historical Figures Through Event Knowledge Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2021/333500a156/1yXudtjcVMc",
"parentPublication": {
"id": "proceedings/vis/2021/3335/0",
"title": "2021 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09695173",
"articleId": "1AvqJqAJOKY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09695246",
"articleId": "1AvqJVgygfe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1AH3QmyyGHu",
"name": "ttg555501-09695348s1-supp1-3146508.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09695348s1-supp1-3146508.mp4",
"extension": "mp4",
"size": "33.1 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1As7aEVtgNW",
"doi": "10.1109/TVCG.2022.3146000",
"abstract": "Coloring line art images based on the colors of reference images is an important stage in animation production, which is time-consuming and tedious. In this paper, we propose a deep architecture to automatically color line art videos with the same color style as the given reference images. Our framework consists of a color transform network and a temporal refinement network based on 3U-net. The color transform network takes the target line art images as well as the line art and color images of the reference images as input, and generates corresponding target color images. To cope with the large differences between each target line art image and the reference color images, we propose a distance attention layer that utilizes non-local similarity matching to determine the region correspondences between the target image and the reference images and transforms the local color information from the references to the target. To ensure global color style consistency, we further incorporate Adaptive Instance Normalization (AdaIN) with the transformation parameters obtained from a multiple-layer AdaIN that describes the global color style of the references, extracted by an embedder network. The temporal refinement network learns spatiotemporal features through 3D convolutions to ensure the temporal color consistency of the results. Our model can achieve even better coloring results by fine-tuning the parameters with only a small number of samples when dealing with an animation of a new style. To evaluate our method, we build a line art coloring dataset.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Coloring line art images based on the colors of reference images is an important stage in animation production, which is time-consuming and tedious. In this paper, we propose a deep architecture to automatically color line art videos with the same color style as the given reference images. Our framework consists of a color transform network and a temporal refinement network based on 3U-net. The color transform network takes the target line art images as well as the line art and color images of the reference images as input, and generates corresponding target color images. To cope with the large differences between each target line art image and the reference color images, we propose a distance attention layer that utilizes non-local similarity matching to determine the region correspondences between the target image and the reference images and transforms the local color information from the references to the target. To ensure global color style consistency, we further incorporate Adaptive Instance Normalization (AdaIN) with the transformation parameters obtained from a multiple-layer AdaIN that describes the global color style of the references, extracted by an embedder network. The temporal refinement network learns spatiotemporal features through 3D convolutions to ensure the temporal color consistency of the results. Our model can achieve even better coloring results by fine-tuning the parameters with only a small number of samples when dealing with an animation of a new style. To evaluate our method, we build a line art coloring dataset.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Coloring line art images based on the colors of reference images is an important stage in animation production, which is time-consuming and tedious. In this paper, we propose a deep architecture to automatically color line art videos with the same color style as the given reference images. Our framework consists of a color transform network and a temporal refinement network based on 3U-net. The color transform network takes the target line art images as well as the line art and color images of the reference images as input, and generates corresponding target color images. To cope with the large differences between each target line art image and the reference color images, we propose a distance attention layer that utilizes non-local similarity matching to determine the region correspondences between the target image and the reference images and transforms the local color information from the references to the target. To ensure global color style consistency, we further incorporate Adaptive Instance Normalization (AdaIN) with the transformation parameters obtained from a multiple-layer AdaIN that describes the global color style of the references, extracted by an embedder network. The temporal refinement network learns spatiotemporal features through 3D convolutions to ensure the temporal color consistency of the results. Our model can achieve even better coloring results by fine-tuning the parameters with only a small number of samples when dealing with an animation of a new style. To evaluate our method, we build a line art coloring dataset.",
"title": "Deep Line Art Video Colorization with a Few References",
"normalizedTitle": "Deep Line Art Video Colorization with a Few References",
"fno": "09693178",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Image Color Analysis",
"Art",
"Animation",
"Feature Extraction",
"Three Dimensional Displays",
"Transforms",
"Color",
"Line Art Colorization",
"Color Transform",
"Temporal Coherence",
"Few Shot Learning"
],
"authors": [
{
"givenName": "Min",
"surname": "Shi",
"fullName": "Min Shi",
"affiliation": "The School of Control & Computer Engineering, North China Electric Power University, 47840 Beijing, China, 102206 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jia-Qi",
"surname": "Zhang",
"fullName": "Jia-Qi Zhang",
"affiliation": "School of control and computer engineering, North China Electric Power University, 47840 Beijing, Beijing, China, 102206 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shu-Yu",
"surname": "Chen",
"fullName": "Shu-Yu Chen",
"affiliation": "Adcanced Computer Research Center, Institute of Computing Technology Chinese Academy of Sciences, 53035 Beijing, Beijing, China, 100190 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lin",
"surname": "Gao",
"fullName": "Lin Gao",
"affiliation": "Advanced Computer Research Center, Institute of Computing Technology Chinese Academy of Sciences, 53035 Beijing, Beijing, China, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yu-Kun",
"surname": "Lai",
"fullName": "Yu-Kun Lai",
"affiliation": "School of Computer Science and Informatics, Cardiff University, 2112 Cardiff, South Glamorgan, United Kingdom of Great Britain and Northern Ireland, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Fanglue",
"surname": "Zhang",
"fullName": "Fanglue Zhang",
"affiliation": "ECS, Victoria University of Wellington, 8491 Wellington, Wellington State, New Zealand, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-01-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2015/8391/0/8391a415",
"title": "Deep Colorization",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a415/12OmNBNM93v",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500a965",
"title": "Late-resizing: A Simple but Effective Sketch Extraction Strategy for Improving Generalization of Line-art Colorization",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500a965/1B13HiwSCBy",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300j055",
"title": "Tag2Pix: Line Art Colorization Using Text Tag With SECat and Changing Loss",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300j055/1hVlpf5xOp2",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300d157",
"title": "Artist-Guided Semiautomatic Animation Colorization",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300d157/1i5mP2ezeqQ",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/02/09143503",
"title": "Active Colorization for Cartoon Line Drawings",
"doi": null,
"abstractUrl": "/journal/tg/2022/02/09143503/1lxmsQXZ36U",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800f800",
"title": "Reference-Based Sketch Image Colorization Using Augmented-Self Reference and Dense Semantic Correspondence",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800f800/1m3nh4A8M8g",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412756",
"title": "Stylized-Colorization for Line Arts",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412756/1tmiCa6wp8c",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700d871",
"title": "Line Art Correlation Matching Feature Transfer Network for Automatic Animation Colorization",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700d871/1uqGPvnkBUI",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2021/4899/0/489900d941",
"title": "Line Art Colorization with Concatenated Spatial Attention",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2021/489900d941/1yVzYqkncJy",
"parentPublication": {
"id": "proceedings/cvprw/2021/4899/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900j884",
"title": "User-Guided Line Art Flat Filling with Split Filling Mechanism",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900j884/1yeJWoXWd7W",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09693232",
"articleId": "1As79CUmeZO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09695173",
"articleId": "1AvqJqAJOKY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1AtLR4Rmt6U",
"name": "ttg555501-09693178s1-supp2-3146000.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09693178s1-supp2-3146000.pdf",
"extension": "pdf",
"size": "27.9 MB",
"__typename": "WebExtraType"
},
{
"id": "1AtLRlHEIEg",
"name": "ttg555501-09693178s1-supp1-3146000.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09693178s1-supp1-3146000.mp4",
"extension": "mp4",
"size": "29.1 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1As79CUmeZO",
"doi": "10.1109/TVCG.2022.3144975",
"abstract": "Data workers use various scripting languages for data transformation, such as SAS, R, and Python. However, understanding intricate code pieces requires advanced programming skills, which hinders data workers from grasping the idea of data transformation at ease. Program visualization is beneficial for debugging and education and has the potential to illustrate transformations intuitively and interactively. In this paper, we explore visualization design for demonstrating the semantics of code pieces in the context of data transformation. First, to depict individual data transformations, we structure a design space by two primary dimensions, i.e., key parameters to encode and possible visual channels to be mapped. Then, we derive a collection of 23 glyphs that visualize the semantics of transformations. Next, we design a pipeline, named Somnus, that provides an overview of the creation and evolution of data tables using a provenance graph. At the same time, it allows detailed investigation of individual transformations. User feedback on Somnus is positive. Our study participants achieved better accuracy with less time using Somnus, and preferred it over carefully-crafted textual description. Further, we provide two example applications to demonstrate the utility and versatility of Somnus.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Data workers use various scripting languages for data transformation, such as SAS, R, and Python. However, understanding intricate code pieces requires advanced programming skills, which hinders data workers from grasping the idea of data transformation at ease. Program visualization is beneficial for debugging and education and has the potential to illustrate transformations intuitively and interactively. In this paper, we explore visualization design for demonstrating the semantics of code pieces in the context of data transformation. First, to depict individual data transformations, we structure a design space by two primary dimensions, i.e., key parameters to encode and possible visual channels to be mapped. Then, we derive a collection of 23 glyphs that visualize the semantics of transformations. Next, we design a pipeline, named Somnus, that provides an overview of the creation and evolution of data tables using a provenance graph. At the same time, it allows detailed investigation of individual transformations. User feedback on Somnus is positive. Our study participants achieved better accuracy with less time using Somnus, and preferred it over carefully-crafted textual description. Further, we provide two example applications to demonstrate the utility and versatility of Somnus.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Data workers use various scripting languages for data transformation, such as SAS, R, and Python. However, understanding intricate code pieces requires advanced programming skills, which hinders data workers from grasping the idea of data transformation at ease. Program visualization is beneficial for debugging and education and has the potential to illustrate transformations intuitively and interactively. In this paper, we explore visualization design for demonstrating the semantics of code pieces in the context of data transformation. First, to depict individual data transformations, we structure a design space by two primary dimensions, i.e., key parameters to encode and possible visual channels to be mapped. Then, we derive a collection of 23 glyphs that visualize the semantics of transformations. Next, we design a pipeline, named Somnus, that provides an overview of the creation and evolution of data tables using a provenance graph. At the same time, it allows detailed investigation of individual transformations. User feedback on Somnus is positive. Our study participants achieved better accuracy with less time using Somnus, and preferred it over carefully-crafted textual description. Further, we provide two example applications to demonstrate the utility and versatility of Somnus.",
"title": "Visualizing the Scripts of Data Wrangling with SOMNUS",
"normalizedTitle": "Visualizing the Scripts of Data Wrangling with SOMNUS",
"fno": "09693232",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Codes",
"Semantics",
"Visualization",
"Debugging",
"Python",
"Task Analysis",
"Program Understanding",
"Data Transformation",
"Visualization Design"
],
"authors": [
{
"givenName": "Kai",
"surname": "Xiong",
"fullName": "Kai Xiong",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, 12377 Hangzhou, Zhejiang, China, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Siwei",
"surname": "Fu",
"fullName": "Siwei Fu",
"affiliation": "Artificial Intelligence Research Institute, Zhejiang Lab, 559075 Hangzhou, Zhejiang, China, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guoming",
"surname": "Ding",
"fullName": "Guoming Ding",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, 12377 Hangzhou, Zhejiang, China, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhongsu",
"surname": "Luo",
"fullName": "Zhongsu Luo",
"affiliation": "College of Computer Science and Technology, Zhejiang University of Technology, 12624 Hangzhou, Zhejiang, China, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rong",
"surname": "Yu",
"fullName": "Rong Yu",
"affiliation": "Artificial Intelligence Research Institute, Zhejiang Lab, 559075 Hangzhou, Zhejiang, China, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wei",
"surname": "Chen",
"fullName": "Wei Chen",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, 12377 Hangzhou, Zhejiang, China, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hujun",
"surname": "Bao",
"fullName": "Hujun Bao",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, 12377 Hangzhou, Zhejiang, China, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yingcai",
"surname": "Wu",
"fullName": "Yingcai Wu",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, 12377 Hangzhou, Zhejiang, China, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-01-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/waina/2012/4652/0/4652a344",
"title": "Characterizing Obfuscated JavaScript Using Abstract Syntax Trees: Experimenting with Malicious Scripts",
"doi": null,
"abstractUrl": "/proceedings-article/waina/2012/4652a344/12OmNC4eSz4",
"parentPublication": {
"id": "proceedings/waina/2012/4652/0",
"title": "2012 26th International Conference on Advanced Information Networking and Applications Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2000/6478/0/64780055",
"title": "Visualizing Volume Data using Physical Models",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2000/64780055/12OmNzBwGt1",
"parentPublication": {
"id": "proceedings/ieee-vis/2000/6478/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2013/4909/0/06544882",
"title": "Logical provenance in data-oriented workflows?",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2013/06544882/12OmNzcPAuw",
"parentPublication": {
"id": "proceedings/icde/2013/4909/0",
"title": "2013 29th IEEE International Conference on Data Engineering (ICDE 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2017/2715/0/08258015",
"title": "Data context informed data wrangling",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2017/08258015/17D45WGGoLr",
"parentPublication": {
"id": "proceedings/big-data/2017/2715/0",
"title": "2017 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ase/2021/0337/0/033700a304",
"title": "Subtle Bugs Everywhere: Generating Documentation for Data Wrangling Code",
"doi": null,
"abstractUrl": "/proceedings-article/ase/2021/033700a304/1AjThH8TZEQ",
"parentPublication": {
"id": "proceedings/ase/2021/0337/0",
"title": "2021 36th IEEE/ACM International Conference on Automated Software Engineering (ASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2022/0883/0/088300d178",
"title": "How, Where, and Why Data Provenance Improves Query Debugging: A Visual Demonstration of Fine–Grained Provenance Analysis for SQL",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2022/088300d178/1FwFn3RfhFm",
"parentPublication": {
"id": "proceedings/icde/2022/0883/0",
"title": "2022 IEEE 38th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904459",
"title": "Revealing the Semantics of Data Wrangling Scripts With Comantics",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904459/1H1gq4Y9HZm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vds/2022/5721/0/572100a037",
"title": "Interactive Visualization for Data Science Scripts",
"doi": null,
"abstractUrl": "/proceedings-article/vds/2022/572100a037/1JezKXQ9OQ8",
"parentPublication": {
"id": "proceedings/vds/2022/5721/0",
"title": "2022 IEEE Visualization in Data Science (VDS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apsec/2022/5537/0/553700a472",
"title": "Visualizing Contributor Code Competency for PyPI Libraries: Preliminary Results",
"doi": null,
"abstractUrl": "/proceedings-article/apsec/2022/553700a472/1KOvgB6mKUE",
"parentPublication": {
"id": "proceedings/apsec/2022/5537/0",
"title": "2022 29th Asia-Pacific Software Engineering Conference (APSEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vl-hcc/2020/6901/0/09127207",
"title": "Casual Notebooks and Rigid Scripts: Understanding Data Science Programming",
"doi": null,
"abstractUrl": "/proceedings-article/vl-hcc/2020/09127207/1lvQ16rOssE",
"parentPublication": {
"id": "proceedings/vl-hcc/2020/6901/0",
"title": "2020 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09689957",
"articleId": "1AlCfIlPhfy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09693178",
"articleId": "1As7aEVtgNW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1AlCfIlPhfy",
"doi": "10.1109/TVCG.2022.3144479",
"abstract": "Rendering glinty details from specular microstructure enhances the level of realism in computer graphics. However, naive sampling fails to render such effects, due to insufficient sampling of the contributing normals on the surface patch visible through a pixel. Other approaches resort to searching for the relevant normals in more explicit ways, but they rely on special acceleration structures, leading to increased storage costs and complexity. In this paper, we propose to render specular glints through a different method: differentiable regularization. Our method includes two steps: first, we use differentiable path tracing to render a scene with a larger light size and/or rougher surfaces and record the gradients with respect to light size and roughness. Next, we use the result for the larger light size and rougher surfaces, together with their gradients, to predict the target value for the required light size and roughness by extrapolation. In the end, we get significantly reduced noise compared to rendering the scene directly. Our results are close to the reference, which uses many more samples per pixel. Although our method is biased, the overhead for differentiable rendering and prediction is negligible, so our improvement is essentially free. We demonstrate our differentiable regularization on several normal maps, all of which benefit from the method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Rendering glinty details from specular microstructure enhances the level of realism in computer graphics. However, naive sampling fails to render such effects, due to insufficient sampling of the contributing normals on the surface patch visible through a pixel. Other approaches resort to searching for the relevant normals in more explicit ways, but they rely on special acceleration structures, leading to increased storage costs and complexity. In this paper, we propose to render specular glints through a different method: differentiable regularization. Our method includes two steps: first, we use differentiable path tracing to render a scene with a larger light size and/or rougher surfaces and record the gradients with respect to light size and roughness. Next, we use the result for the larger light size and rougher surfaces, together with their gradients, to predict the target value for the required light size and roughness by extrapolation. In the end, we get significantly reduced noise compared to rendering the scene directly. Our results are close to the reference, which uses many more samples per pixel. Although our method is biased, the overhead for differentiable rendering and prediction is negligible, so our improvement is essentially free. We demonstrate our differentiable regularization on several normal maps, all of which benefit from the method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Rendering glinty details from specular microstructure enhances the level of realism in computer graphics. However, naive sampling fails to render such effects, due to insufficient sampling of the contributing normals on the surface patch visible through a pixel. Other approaches resort to searching for the relevant normals in more explicit ways, but they rely on special acceleration structures, leading to increased storage costs and complexity. In this paper, we propose to render specular glints through a different method: differentiable regularization. Our method includes two steps: first, we use differentiable path tracing to render a scene with a larger light size and/or rougher surfaces and record the gradients with respect to light size and roughness. Next, we use the result for the larger light size and rougher surfaces, together with their gradients, to predict the target value for the required light size and roughness by extrapolation. In the end, we get significantly reduced noise compared to rendering the scene directly. Our results are close to the reference, which uses many more samples per pixel. Although our method is biased, the overhead for differentiable rendering and prediction is negligible, so our improvement is essentially free. We demonstrate our differentiable regularization on several normal maps, all of which benefit from the method.",
"title": "Efficient Specular Glints Rendering with Differentiable Regularization",
"normalizedTitle": "Efficient Specular Glints Rendering with Differentiable Regularization",
"fno": "09689957",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Rendering Computer Graphics",
"Surface Roughness",
"Rough Surfaces",
"Microstructure",
"Light Sources",
"Real Time Systems",
"Gaussian Distribution",
"Microstructure",
"Glints Rendering",
"Differentiable Rendering"
],
"authors": [
{
"givenName": "Jiahui",
"surname": "Fan",
"fullName": "Jiahui Fan",
"affiliation": "Computer Science and Engineering, Nanjing University of Science and Technology, 12436 Nanjing, Jiangsu, China, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Beibei",
"surname": "Wang",
"fullName": "Beibei Wang",
"affiliation": "Computer Science and Engineering, Nanjing University of Science and Technology, 12436 Nanjing, Jiangsu, China, 210094 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wenshi",
"surname": "Wu",
"fullName": "Wenshi Wu",
"affiliation": "Computer Science and Engineering, Nanjing University of Science and Technology, 12436 Nanjing, Jiangsu, China, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Milos",
"surname": "Hasan",
"fullName": "Milos Hasan",
"affiliation": "Adobe Research, ADOBE, San Jose, California, United States, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jian",
"surname": "Yang",
"fullName": "Jian Yang",
"affiliation": "Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, Jiangsu, China, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ling-Qi",
"surname": "Yan",
"fullName": "Ling-Qi Yan",
"affiliation": "Department of Computer Science, University of California Santa Barbara, 8786 Santa Barbara, California, United States, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-01-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iai/2004/8387/0/01300934",
"title": "Imaging and rendering of oil paintings using a multi-band camera",
"doi": null,
"abstractUrl": "/proceedings-article/iai/2004/01300934/12OmNB7tUq4",
"parentPublication": {
"id": "proceedings/iai/2004/8387/0",
"title": "2004 Southwest Symposium on Image Analysis and Interpretation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1992/2855/0/00223149",
"title": "Extracting the shape and roughness of specular lobe objects using four light photometric stereo",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1992/00223149/12OmNwoxSc1",
"parentPublication": {
"id": "proceedings/cvpr/1992/2855/0",
"title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1992/2720/0/00220132",
"title": "Inspecting specular lobe objects using four light sources",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1992/00220132/12OmNzd7bWl",
"parentPublication": {
"id": "proceedings/robot/1992/2720/0",
"title": "Proceedings 1992 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2017/04/07873287",
"title": "Torque Contribution to Haptic Rendering of Virtual Textures",
"doi": null,
"abstractUrl": "/journal/th/2017/04/07873287/13rRUILLkvB",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/10/ttg2012101591",
"title": "Real-Time Rendering of Rough Refraction",
"doi": null,
"abstractUrl": "/journal/tg/2012/10/ttg2012101591/13rRUxBa5rT",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/03/07866010",
"title": "Rendering Rough Opaque Materials with Interfaced Lambertian Microfacets",
"doi": null,
"abstractUrl": "/journal/tg/2018/03/07866010/13rRUyYSWl7",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200g068",
"title": "Differentiable Surface Rendering via Non-Differentiable Sampling",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200g068/1BmFpmQFMKA",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10043749",
"title": "MILO: Multi-bounce Inverse Rendering for Indoor Scene with Light-emitting Objects",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10043749/1KJs5SH0na8",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssiai/2020/5745/0/09094615",
"title": "Optical Quality Control for Adaptive Polishing Processes",
"doi": null,
"abstractUrl": "/proceedings-article/ssiai/2020/09094615/1jVQDKlDMBO",
"parentPublication": {
"id": "proceedings/ssiai/2020/5745/0",
"title": "2020 IEEE Southwest Symposium on Image Analysis and Interpretation (SSIAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552224",
"title": "Differentiable Direct Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552224/1xibZvRmYzm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09684948",
"articleId": "1Ai9uf96gGk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09693232",
"articleId": "1As79CUmeZO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1Aqs0jsBA9a",
"name": "ttg555501-09689957s1-supp2-3144479.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09689957s1-supp2-3144479.mp4",
"extension": "mp4",
"size": "187 MB",
"__typename": "WebExtraType"
},
{
"id": "1AqrZEMi0jS",
"name": "ttg555501-09689957s1-supp1-3144479.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09689957s1-supp1-3144479.mp4",
"extension": "mp4",
"size": "187 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Ai9uf96gGk",
"doi": "10.1109/TVCG.2022.3144143",
"abstract": "Mesh Schelling points explain how humans focus on specific regions of a 3D object. They have a large number of important applications in computer graphics and provide valuable information for perceptual psychology studies. However, detecting mesh Schelling points is time-consuming and expensive since the existing techniques are mostly based on participant observation studies. To overcome these limitations, we propose to employ powerful deep learning techniques to detect mesh Schelling points in an automatic manner, free from participant observation studies. Specifically, we utilize the mesh convolution and pooling operations to extract informative features from mesh objects, and then predict the 3D heat map of Schelling points in an end-to-end manner. In addition, we propose a Deep Schelling Network (DS-Net) to automatically detect the Schelling points, including a multi-scale fusion component and a novel region-specific loss function to improve our network for a better regression of heat maps. To the best of our knowledge, DS-Net is the first deep neural network for detecting Schelling points from 3D meshes. We evaluate DS-Net on a mesh Schelling point dataset obtained from participant observation studies. The experimental results demonstrate that DS-Net is capable of detecting mesh Schelling points effectively and outperforms various state-of-the-art mesh saliency methods and deep learning models, both qualitatively and quantitatively.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Mesh Schelling points explain how humans focus on specific regions of a 3D object. They have a large number of important applications in computer graphics and provide valuable information for perceptual psychology studies. However, detecting mesh Schelling points is time-consuming and expensive since the existing techniques are mostly based on participant observation studies. To overcome these limitations, we propose to employ powerful deep learning techniques to detect mesh Schelling points in an automatic manner, free from participant observation studies. Specifically, we utilize the mesh convolution and pooling operations to extract informative features from mesh objects, and then predict the 3D heat map of Schelling points in an end-to-end manner. In addition, we propose a Deep Schelling Network (DS-Net) to automatically detect the Schelling points, including a multi-scale fusion component and a novel region-specific loss function to improve our network for a better regression of heat maps. To the best of our knowledge, DS-Net is the first deep neural network for detecting Schelling points from 3D meshes. We evaluate DS-Net on a mesh Schelling point dataset obtained from participant observation studies. The experimental results demonstrate that DS-Net is capable of detecting mesh Schelling points effectively and outperforms various state-of-the-art mesh saliency methods and deep learning models, both qualitatively and quantitatively.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Mesh Schelling points explain how humans focus on specific regions of a 3D object. They have a large number of important applications in computer graphics and provide valuable information for perceptual psychology studies. However, detecting mesh Schelling points is time-consuming and expensive since the existing techniques are mostly based on participant observation studies. To overcome these limitations, we propose to employ powerful deep learning techniques to detect mesh Schelling points in an automatic manner, free from participant observation studies. Specifically, we utilize the mesh convolution and pooling operations to extract informative features from mesh objects, and then predict the 3D heat map of Schelling points in an end-to-end manner. In addition, we propose a Deep Schelling Network (DS-Net) to automatically detect the Schelling points, including a multi-scale fusion component and a novel region-specific loss function to improve our network for a better regression of heat maps. To the best of our knowledge, DS-Net is the first deep neural network for detecting Schelling points from 3D meshes. We evaluate DS-Net on a mesh Schelling point dataset obtained from participant observation studies. The experimental results demonstrate that DS-Net is capable of detecting mesh Schelling points effectively and outperforms various state-of-the-art mesh saliency methods and deep learning models, both qualitatively and quantitatively.",
"title": "Automatic Schelling Points Detection from Meshes",
"normalizedTitle": "Automatic Schelling Points Detection from Meshes",
"fno": "09684948",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Three Dimensional Displays",
"Deep Learning",
"Heating Systems",
"Feature Extraction",
"Shape",
"Point Cloud Compression",
"Image Edge Detection",
"Deep Neural Network",
"Mesh Schelling Points",
"Geometric Deep Learning",
"Heat Map Regression"
],
"authors": [
{
"givenName": "Geng",
"surname": "Chen",
"fullName": "Geng Chen",
"affiliation": "IIAI, Inception Institute of Artificial Intelligence, Abu Dhabi, Abu Dhabi, United Arab Emirates, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hang",
"surname": "Dai",
"fullName": "Hang Dai",
"affiliation": "MBZUAI, MBZUAI, Abu Dhabi, Abu Dhabi, United Arab Emirates, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tao",
"surname": "Zhou",
"fullName": "Tao Zhou",
"affiliation": "IIAI, Inception Institute of Artificial Intelligence, Abu Dhabi, Abu Dhabi, United Arab Emirates, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jianbing",
"surname": "Shen",
"fullName": "Jianbing Shen",
"affiliation": "Department of Information Technology and Electrical Engineering, ETH Zurich, Zurich, Zurich, Switzerland, CH-8092 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ling",
"surname": "Shao",
"fullName": "Ling Shao",
"affiliation": "IIAI, Inception Institute of Artificial Intelligence, Abu Dhabi, Abu Dhabi, United Arab Emirates, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-01-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cse/2011/4477/0/4477a155",
"title": "Detecting and Classifying Umblic Points from Polynomial Fitting Point Cloud",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2011/4477a155/12OmNqJq4ny",
"parentPublication": {
"id": "proceedings/cse/2011/4477/0",
"title": "2011 14th IEEE International Conference on Computational Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dim/1999/0062/0/00620302",
"title": "Generating Smooth Surfaces with Bicubic Splines over Triangular Meshes: Toward Automatic Model Building from Unorganized 3D Points",
"doi": null,
"abstractUrl": "/proceedings-article/3dim/1999/00620302/12OmNxGja3m",
"parentPublication": {
"id": "proceedings/3dim/1999/0062/0",
"title": "3D Digital Imaging and Modeling, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2001/1227/0/12270160",
"title": "Direct Reconstruction of Displaced Subdivision Surface from Unorganized Points",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2001/12270160/12OmNxvwp1G",
"parentPublication": {
"id": "proceedings/pg/2001/1227/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2016/5510/0/07881425",
"title": "Automatic Detection of Characteristic Viscosity Points in Mineralogical Samples",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2016/07881425/12OmNzXnNDD",
"parentPublication": {
"id": "proceedings/csci/2016/5510/0",
"title": "2016 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mmm/2004/2084/0/20840187",
"title": "Decomposing Polygon Meshes by Means of Critical Points",
"doi": null,
"abstractUrl": "/proceedings-article/mmm/2004/20840187/12OmNzd7bUO",
"parentPublication": {
"id": "proceedings/mmm/2004/2084/0",
"title": "Multi-Media Modeling Conference, International",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/1999/05/t0536",
"title": "A New Network Topology with Multiple Meshes",
"doi": null,
"abstractUrl": "/journal/tc/1999/05/t0536/13rRUygT7m4",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiam/2021/1732/0/173200a407",
"title": "An Improved ICP Point Cloud Registration Algorithm Based on Three-Points Congruent Sets",
"doi": null,
"abstractUrl": "/proceedings-article/aiam/2021/173200a407/1BzTJDeh3Ms",
"parentPublication": {
"id": "proceedings/aiam/2021/1732/0",
"title": "2021 3rd International Conference on Artificial Intelligence and Advanced Manufacture (AIAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/candarw/2022/7532/0/753200a218",
"title": "3D Mesh Generation from a Defective Point Cloud using Style Transformation",
"doi": null,
"abstractUrl": "/proceedings-article/candarw/2022/753200a218/1LAz1N5rnva",
"parentPublication": {
"id": "proceedings/candarw/2022/7532/0",
"title": "2022 Tenth International Symposium on Computing and Networking Workshops (CANDARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300l1959",
"title": "LBS Autoencoder: Self-Supervised Fitting of Articulated Meshes to Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300l1959/1gyrapLInSw",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900a546",
"title": "Body Meshes as Points",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900a546/1yeHUobJhZK",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09684694",
"articleId": "1AgmoqEvwly",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09689957",
"articleId": "1AlCfIlPhfy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1AgmoqEvwly",
"doi": "10.1109/TVCG.2022.3143615",
"abstract": "Real-time graphics applications require high-quality textured materials to convey realism in virtual environments. Generating these textures is challenging as they need to be visually realistic, seamlessly tileable, and have a small impact on the memory consumption of the application. For this reason, they are often created manually by skilled artists. In this work, we present SeamlessGAN,a method capable of automatically generating tileable texture maps from a single input exemplar. In contrast to most existing methods, focused solely on solving the synthesis problem, our work tackles both problems, synthesis and tileability, simultaneously. Our key idea isto realize that tiling a latent space within a generative network trained using adversarial expansion techniques produces outputs with continuity at the seam intersection that can be then be turned into tileable images by cropping the central area. Since not every value of the latent space is valid to produce high-quality outputs, we leverage the discriminator as a perceptual error metric capable of identifying artifact-free textures during a sampling process. Further, in contrast to previous work on deep texture synthesis, our model is designed and optimized to work with multi-layered texture representations, enabling textures composed of multiple maps such as albedo, normals, etc. We extensively test our design choices for the network architecture, loss function, and sampling parameters. We show qualitatively and quantitatively that our approach outperforms previous methods and works for textures of different types.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Real-time graphics applications require high-quality textured materials to convey realism in virtual environments. Generating these textures is challenging as they need to be visually realistic, seamlessly tileable, and have a small impact on the memory consumption of the application. For this reason, they are often created manually by skilled artists. In this work, we present SeamlessGAN,a method capable of automatically generating tileable texture maps from a single input exemplar. In contrast to most existing methods, focused solely on solving the synthesis problem, our work tackles both problems, synthesis and tileability, simultaneously. Our key idea isto realize that tiling a latent space within a generative network trained using adversarial expansion techniques produces outputs with continuity at the seam intersection that can be then be turned into tileable images by cropping the central area. Since not every value of the latent space is valid to produce high-quality outputs, we leverage the discriminator as a perceptual error metric capable of identifying artifact-free textures during a sampling process. Further, in contrast to previous work on deep texture synthesis, our model is designed and optimized to work with multi-layered texture representations, enabling textures composed of multiple maps such as albedo, normals, etc. We extensively test our design choices for the network architecture, loss function, and sampling parameters. We show qualitatively and quantitatively that our approach outperforms previous methods and works for textures of different types.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Real-time graphics applications require high-quality textured materials to convey realism in virtual environments. Generating these textures is challenging as they need to be visually realistic, seamlessly tileable, and have a small impact on the memory consumption of the application. For this reason, they are often created manually by skilled artists. In this work, we present SeamlessGAN,a method capable of automatically generating tileable texture maps from a single input exemplar. In contrast to most existing methods, focused solely on solving the synthesis problem, our work tackles both problems, synthesis and tileability, simultaneously. Our key idea isto realize that tiling a latent space within a generative network trained using adversarial expansion techniques produces outputs with continuity at the seam intersection that can be then be turned into tileable images by cropping the central area. Since not every value of the latent space is valid to produce high-quality outputs, we leverage the discriminator as a perceptual error metric capable of identifying artifact-free textures during a sampling process. Further, in contrast to previous work on deep texture synthesis, our model is designed and optimized to work with multi-layered texture representations, enabling textures composed of multiple maps such as albedo, normals, etc. We extensively test our design choices for the network architecture, loss function, and sampling parameters. We show qualitatively and quantitatively that our approach outperforms previous methods and works for textures of different types.",
"title": "SeamlessGAN: Self-Supervised Synthesis of Tileable Texture Maps",
"normalizedTitle": "SeamlessGAN: Self-Supervised Synthesis of Tileable Texture Maps",
"fno": "09684694",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Measurement",
"Crops",
"Training",
"Semantics",
"Generative Adversarial Networks",
"Estimation",
"Virtual Environments",
"Artificial Intelligence",
"Artificial Neural Network",
"Machine Vision",
"Image Texture",
"Graphics",
"Computational Photography"
],
"authors": [
{
"givenName": "Carlos",
"surname": "Rodriguez - Pardo",
"fullName": "Carlos Rodriguez - Pardo",
"affiliation": "Research, Seddi, Madrid, Madrid, Spain, 28007 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Elena",
"surname": "Garces",
"fullName": "Elena Garces",
"affiliation": "Research, SEDDI, Madrid, Madrid, Spain, 28007 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-01-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2015/6964/0/07298817",
"title": "Texture representations for image and video synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2015/07298817/12OmNvoFjUu",
"parentPublication": {
"id": "proceedings/cvpr/2015/6964/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2009/3789/0/3789a018",
"title": "Skeletal Texture Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2009/3789a018/12OmNwIHopo",
"parentPublication": {
"id": "proceedings/cgiv/2009/3789/0",
"title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2006/2597/1/259710427",
"title": "Quantitative Evaluation of Near Regular Texture Synthesis Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2006/259710427/12OmNwpoFKW",
"parentPublication": {
"id": "proceedings/cvpr/2006/2597/2",
"title": "2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2005/2389/0/23890383",
"title": "Patch-Based Texture Synthesis Using Wavelets",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2005/23890383/12OmNwtn3Df",
"parentPublication": {
"id": "proceedings/sibgrapi/2005/2389/0",
"title": "XVIII Brazilian Symposium on Computer Graphics and Image Processing (SIBGRAPI'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciap/2001/1183/0/11830244",
"title": "Texture Synthesis Using Image Pyramids and Self-Organizing Maps",
"doi": null,
"abstractUrl": "/proceedings-article/iciap/2001/11830244/12OmNxWLThP",
"parentPublication": {
"id": "proceedings/iciap/2001/1183/0",
"title": "Proceedings ICIAP 2001. 11th International Conference on Image Analysis and Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imsccs/2006/2581/1/25810766",
"title": "Structural Pattern Analysis for Texture Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/imsccs/2006/25810766/12OmNxvO086",
"parentPublication": {
"id": "proceedings/imsccs/2006/2581/1",
"title": "Computer and Computational Sciences, International Multi-Symposiums on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2013/5051/0/5051a050",
"title": "Texture Synthesis Approach Using Cooperative Features",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2013/5051a050/12OmNyKJip8",
"parentPublication": {
"id": "proceedings/cgiv/2013/5051/0",
"title": "2013 10th International Conference Computer Graphics, Imaging and Visualization (CGIV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2002/1695/1/169510239",
"title": "Multiresolution Block Sampling-Based Method for Texture Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2002/169510239/12OmNz61cXZ",
"parentPublication": {
"id": "proceedings/icpr/2002/1695/1",
"title": "Proceedings of 16th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2010/04/ttp2010040733",
"title": "Texture Synthesis with Grouplets",
"doi": null,
"abstractUrl": "/journal/tp/2010/04/ttp2010040733/13rRUx0xQ0D",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600b455",
"title": "AUV-Net: Learning Aligned UV Maps for Texture Transfer and Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600b455/1H1jiWtcBOw",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09652041",
"articleId": "1zmuReh8VZ6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09684948",
"articleId": "1Ai9uf96gGk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1Ai9uWSb60w",
"name": "ttg555501-09684694s1-supp1-3143615.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09684694s1-supp1-3143615.pdf",
"extension": "pdf",
"size": "35.5 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1zmuReh8VZ6",
"doi": "10.1109/TVCG.2021.3135697",
"abstract": "Understanding user behavior patterns and visual analysis strategies is a long-standing challenge. Existing approaches rely largely on time-consuming manual processes such as interviews and the analysis of observational data. While it is technically possible to capture a history of user interactions and application states, it remains difficult to extract and describe analysis strategies based on interaction provenance. In this paper, we propose a novel visual approach to the meta-analysis of interaction provenance. We capture single and multiple user sessions as graphs of high-dimensional application states. Our meta-analysis is based on two different types of two-dimensional embeddings of these high-dimensional states: layouts based on (i) topology and (ii) attribute similarity. We applied these visualization approaches to synthetic and real user provenance data captured in two user studies. From our visualizations, we were able to extract patterns for data types and analytical reasoning strategies.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Understanding user behavior patterns and visual analysis strategies is a long-standing challenge. Existing approaches rely largely on time-consuming manual processes such as interviews and the analysis of observational data. While it is technically possible to capture a history of user interactions and application states, it remains difficult to extract and describe analysis strategies based on interaction provenance. In this paper, we propose a novel visual approach to the meta-analysis of interaction provenance. We capture single and multiple user sessions as graphs of high-dimensional application states. Our meta-analysis is based on two different types of two-dimensional embeddings of these high-dimensional states: layouts based on (i) topology and (ii) attribute similarity. We applied these visualization approaches to synthetic and real user provenance data captured in two user studies. From our visualizations, we were able to extract patterns for data types and analytical reasoning strategies.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Understanding user behavior patterns and visual analysis strategies is a long-standing challenge. Existing approaches rely largely on time-consuming manual processes such as interviews and the analysis of observational data. While it is technically possible to capture a history of user interactions and application states, it remains difficult to extract and describe analysis strategies based on interaction provenance. In this paper, we propose a novel visual approach to the meta-analysis of interaction provenance. We capture single and multiple user sessions as graphs of high-dimensional application states. Our meta-analysis is based on two different types of two-dimensional embeddings of these high-dimensional states: layouts based on (i) topology and (ii) attribute similarity. We applied these visualization approaches to synthetic and real user provenance data captured in two user studies. From our visualizations, we were able to extract patterns for data types and analytical reasoning strategies.",
"title": "Provectories: Embedding-based Analysis of Interaction Provenance Data",
"normalizedTitle": "Provectories: Embedding-based Analysis of Interaction Provenance Data",
"fno": "09652041",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Layout",
"Cognition",
"Visual Analytics",
"Time Series Analysis",
"Task Analysis",
"Collaboration",
"Visualization Techniques",
"Information Visualization",
"Visual Analytics",
"Interaction Provenance",
"Sensemaking"
],
"authors": [
{
"givenName": "Conny",
"surname": "Walchshofer",
"fullName": "Conny Walchshofer",
"affiliation": "Institute of Computer Graphics, Johannes Kepler University Linz, 27266 Linz, Upper Austria, Austria, (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Andreas",
"surname": "Hinterreiter",
"fullName": "Andreas Hinterreiter",
"affiliation": "Institute of Computer Graphics, Johannes Kepler University Linz, 27266 Linz, Upper Austria, Austria, 4040 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kai",
"surname": "Xu",
"fullName": "Kai Xu",
"affiliation": "Computer Science, Middlesex University, London, London, United Kingdom of Great Britain and Northern Ireland, NW4 4BT (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Holger",
"surname": "Stitz",
"fullName": "Holger Stitz",
"affiliation": "Institute of Computer Graphics, Johannes Kepler University Linz, Linz, Upper Austria, Austria, 4040 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Marc",
"surname": "Streit",
"fullName": "Marc Streit",
"affiliation": "Institute of Computer Graphics, Johannes Kepler University Linz, Linz, Upper Austria, Austria, 4040 (e-mail: [email protected])",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2021-12-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/scc/2012/3049/0/06274153",
"title": "Analysis of Scientific Workflow Provenance Access Control Policies",
"doi": null,
"abstractUrl": "/proceedings-article/scc/2012/06274153/12OmNBCqbCi",
"parentPublication": {
"id": "proceedings/scc/2012/3049/0",
"title": "2012 IEEE International Conference on Services Computing (SCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/e-science/2011/2163/0/06123284",
"title": "In Situ Data Provenance Capture in Spreadsheets",
"doi": null,
"abstractUrl": "/proceedings-article/e-science/2011/06123284/12OmNvqW6TS",
"parentPublication": {
"id": "proceedings/e-science/2011/2163/0",
"title": "2011 IEEE 7th International Conference on E-Science (e-Science)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2016/5661/0/07883520",
"title": "Visual analysis and coding of data-rich user behavior",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2016/07883520/12OmNzXFoyS",
"parentPublication": {
"id": "proceedings/vast/2016/5661/0",
"title": "2016 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192714",
"title": "Characterizing Provenance in Visualization and Data Analysis: An Organizational Framework of Provenance Types and Purposes",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192714/13rRUxOdD2F",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/12/ttg2013122139",
"title": "An Extensible Framework for Provenance in Human Terrain Visual Analytics",
"doi": null,
"abstractUrl": "/journal/tg/2013/12/ttg2013122139/13rRUyfbwqH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2017/3163/0/08585615",
"title": "MC2 — Spatio-Temporal Provenance Data Aggregation for Visual Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2017/08585615/17D45VTRovn",
"parentPublication": {
"id": "proceedings/vast/2017/3163/0",
"title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2019/06/08788592",
"title": "Analytic Provenance in Practice: The Role of Provenance in Real-World Visualization and Data Analysis Environments",
"doi": null,
"abstractUrl": "/magazine/cg/2019/06/08788592/1cfqCMPtgRy",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2019/06/08864010",
"title": "A Provenance Task Abstraction Framework",
"doi": null,
"abstractUrl": "/magazine/cg/2019/06/08864010/1e0YpvcVR7y",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2020/8014/0/801400a191",
"title": "CrowdTrace: Visualizing Provenance in Distributed Sensemaking",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2020/801400a191/1qROdAHfNWU",
"parentPublication": {
"id": "proceedings/vis/2020/8014/0",
"title": "2020 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trex/2021/1817/0/181700a045",
"title": "A Case Study of Using Analytic Provenance to Reconstruct User Trust in a Guided Visual Analytics System",
"doi": null,
"abstractUrl": "/proceedings-article/trex/2021/181700a045/1yQB6KjO9oI",
"parentPublication": {
"id": "proceedings/trex/2021/1817/0",
"title": "2021 IEEE Workshop on TRust and EXpertise in Visual Analytics (TREX)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09552198",
"articleId": "1xibZkiDP8s",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09684694",
"articleId": "1AgmoqEvwly",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1zxGbfSCd7a",
"name": "ttg555501-09652041s1-tvcg-3135697-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09652041s1-tvcg-3135697-mm.zip",
"extension": "zip",
"size": "47.6 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1xibZkiDP8s",
"doi": "10.1109/TVCG.2021.3114755",
"abstract": "In the process of understanding and redesigning the function of proteins in modern biochemistry, protein engineers are increasingly focusing on the exploration of regions in proteins called loops. Analyzing various characteristics of these regions helps the experts to design the transfer of the desired function from one protein to another. This process is denoted as loop grafting. As this process requires extensive manual treatment and currently there is no proper visual support for it, we designed LoopGrafter: a web-based tool that provides experts with visual support through all the loop grafting pipeline steps. The tool is logically divided into several phases, starting with the definition of two input proteins and ending with a set of grafted proteins. Each phase is supported by a specific set of abstracted 2D visual representations of loaded proteins and their loops that are interactively linked with the 3D view onto proteins. By sequentially passing through the individual phases, the user is shaping the list of loops that are potential candidates for loop grafting. In the end, the actual in-silico insertion of the loop candidates from one protein to the other is performed and the results are visually presented to the user. In this way, the fully computational rational design of proteins and their loops results in newly designed protein structures that can be further assembled and tested through in-vitro experiments. LoopGrafter was designed in tight collaboration with protein engineers, and its final appearance reflects many testing iterations. We showcase the contribution of LoopGrafter on a real case scenario and provide the readers with the experts' feedback, confirming the usefulness of our tool.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In the process of understanding and redesigning the function of proteins in modern biochemistry, protein engineers are increasingly focusing on the exploration of regions in proteins called loops. Analyzing various characteristics of these regions helps the experts to design the transfer of the desired function from one protein to another. This process is denoted as loop grafting. As this process requires extensive manual treatment and currently there is no proper visual support for it, we designed LoopGrafter: a web-based tool that provides experts with visual support through all the loop grafting pipeline steps. The tool is logically divided into several phases, starting with the definition of two input proteins and ending with a set of grafted proteins. Each phase is supported by a specific set of abstracted 2D visual representations of loaded proteins and their loops that are interactively linked with the 3D view onto proteins. By sequentially passing through the individual phases, the user is shaping the list of loops that are potential candidates for loop grafting. In the end, the actual in-silico insertion of the loop candidates from one protein to the other is performed and the results are visually presented to the user. In this way, the fully computational rational design of proteins and their loops results in newly designed protein structures that can be further assembled and tested through in-vitro experiments. LoopGrafter was designed in tight collaboration with protein engineers, and its final appearance reflects many testing iterations. We showcase the contribution of LoopGrafter on a real case scenario and provide the readers with the experts' feedback, confirming the usefulness of our tool.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In the process of understanding and redesigning the function of proteins in modern biochemistry, protein engineers are increasingly focusing on the exploration of regions in proteins called loops. Analyzing various characteristics of these regions helps the experts to design the transfer of the desired function from one protein to another. This process is denoted as loop grafting. As this process requires extensive manual treatment and currently there is no proper visual support for it, we designed LoopGrafter: a web-based tool that provides experts with visual support through all the loop grafting pipeline steps. The tool is logically divided into several phases, starting with the definition of two input proteins and ending with a set of grafted proteins. Each phase is supported by a specific set of abstracted 2D visual representations of loaded proteins and their loops that are interactively linked with the 3D view onto proteins. By sequentially passing through the individual phases, the user is shaping the list of loops that are potential candidates for loop grafting. In the end, the actual in-silico insertion of the loop candidates from one protein to the other is performed and the results are visually presented to the user. In this way, the fully computational rational design of proteins and their loops results in newly designed protein structures that can be further assembled and tested through in-vitro experiments. LoopGrafter was designed in tight collaboration with protein engineers, and its final appearance reflects many testing iterations. We showcase the contribution of LoopGrafter on a real case scenario and provide the readers with the experts' feedback, confirming the usefulness of our tool.",
"title": "LoopGrafter: Visual Support for the Grafting Workflow of Protein Loops",
"normalizedTitle": "LoopGrafter: Visual Support for the Grafting Workflow of Protein Loops",
"fno": "09552198",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Proteins",
"Tools",
"Visualization",
"Pipelines",
"Protein Engineering",
"Three Dimensional Displays",
"Data Visualization",
"Protein Visualization",
"Protein Engineering",
"Loop Grafting",
"Abstracted Views"
],
"authors": [
{
"givenName": "Filip",
"surname": "Opaleny",
"fullName": "Filip Opaleny",
"affiliation": "Department of Visual Computing, Faculty of Informatics, Masaryk University, Brno, Czech Republic",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Pavol",
"surname": "Ulbrich",
"fullName": "Pavol Ulbrich",
"affiliation": "Department of Visual Computing, Faculty of Informatics, Masaryk University, Brno, Czech Republic",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Joan",
"surname": "Planas-Iglesias",
"fullName": "Joan Planas-Iglesias",
"affiliation": "Loschmidt Laboratories, Department of Experimental Biology and RECETOX, Faculty of Science, Masaryk University, Brno, Czech Republic",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jan",
"surname": "Byska",
"fullName": "Jan Byska",
"affiliation": "Department of Visual Computing, Faculty of Informatics, Masaryk University, Brno, Czech Republic",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gaspar P.",
"surname": "Pinto",
"fullName": "Gaspar P. Pinto",
"affiliation": "Loschmidt Laboratories, Department of Experimental Biology and RECETOX, Faculty of Science, Masaryk University, Brno, Czech Republic",
"__typename": "ArticleAuthorType"
},
{
"givenName": "David",
"surname": "Bednar",
"fullName": "David Bednar",
"affiliation": "Loschmidt Laboratories, Department of Experimental Biology and RECETOX, Faculty of Science, Masaryk University, Brno, Czech Republic",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Katarina",
"surname": "FurmanovA",
"fullName": "Katarina FurmanovA",
"affiliation": "Department of Visual Computing, Faculty of Informatics, Masaryk University, Brno, Czech Republic",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Barbora",
"surname": "KozlikovA",
"fullName": "Barbora KozlikovA",
"affiliation": "Department of Visual Computing, Faculty of Informatics, Masaryk University, Brno, Czech Republic",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2021-09-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/bibm/2017/3050/0/08217632",
"title": "HIV1-human protein-protein interaction prediction based on interface architecture similarity",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2017/08217632/12OmNBOllj0",
"parentPublication": {
"id": "proceedings/bibm/2017/3050/0",
"title": "2017 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdew/2005/2657/0/22851156",
"title": "Prediction of Protein Functions Based on Protein-Protein Interaction Networks: A Min-Cut Approach",
"doi": null,
"abstractUrl": "/proceedings-article/icdew/2005/22851156/12OmNClQ0Ai",
"parentPublication": {
"id": "proceedings/icdew/2005/2657/0",
"title": "21st International Conference on Data Engineering Workshops (ICDEW'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2014/5669/0/06999204",
"title": "Essential protein identification based on essential protein-protein interaction prediction by integrated edge weights",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2014/06999204/12OmNrY3Lxn",
"parentPublication": {
"id": "proceedings/bibm/2014/5669/0",
"title": "2014 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2011/1799/0/06120452",
"title": "Multi-label Learning for Protein Subcellular Location Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2011/06120452/12OmNs0kyt4",
"parentPublication": {
"id": "proceedings/bibm/2011/1799/0",
"title": "2011 IEEE International Conference on Bioinformatics and Biomedicine",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2014/5669/0/06999122",
"title": "Minimum dominating sets in cell cycle specific protein interaction networks",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2014/06999122/12OmNvTTc7P",
"parentPublication": {
"id": "proceedings/bibm/2014/5669/0",
"title": "2014 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2017/3050/0/08217927",
"title": "Topological properties of protein interaction network and phylogenetic age of proteins",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2017/08217927/12OmNxIzWOf",
"parentPublication": {
"id": "proceedings/bibm/2017/3050/0",
"title": "2017 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2016/3834/0/3834a007",
"title": "Host-Pathogen Protein Interaction Prediction Based on Local Topology Structures of a Protein Interaction Network",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2016/3834a007/12OmNz5JBPC",
"parentPublication": {
"id": "proceedings/bibe/2016/3834/0",
"title": "2016 IEEE 16th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2020/01/08419278",
"title": "Disruption of Protein Complexes from Weighted Complex Networks",
"doi": null,
"abstractUrl": "/journal/tb/2020/01/08419278/13rRUNvyadr",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2021/0126/0/09669606",
"title": "Improving human essential protein prediction using only protein sequences via ensemble learning",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2021/09669606/1A9W69hcizK",
"parentPublication": {
"id": "proceedings/bibm/2021/0126/0",
"title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2022/6819/0/09995224",
"title": "Protein-Protein Interaction Network Analysis Reveals Distinct Patterns of Antibiotic Resistance Genes",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2022/09995224/1JC2QzXaDCM",
"parentPublication": {
"id": "proceedings/bibm/2022/6819/0",
"title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "09652041",
"articleId": "1zmuReh8VZ6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |