data
dict |
---|
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1IXUnbRdUEE",
"doi": "10.1109/TVCG.2022.3226463",
"abstract": "Graphical perception studies typically measure visualization encoding effectiveness using the error of an “average observer”, leading to canonical rankings of encodings for numerical attributes: <italic>e.g.</italic>, position <inline-formula><tex-math notation=\"LaTeX\">Z_$>$_Z</tex-math></inline-formula> area <inline-formula><tex-math notation=\"LaTeX\">Z_$>$_Z</tex-math></inline-formula> angle <inline-formula><tex-math notation=\"LaTeX\">Z_$>$_Z</tex-math></inline-formula> volume. Yet different people may vary in their ability to read different visualization types, leading to variance in this ranking across individuals not captured by population-level metrics using “average observer” models. One way we can bridge this gap is by recasting classic visual perception tasks as tools for assessing individual performance, in addition to overall visualization performance. In this paper we replicate and extend Cleveland and McGill's graphical comparison experiment using Bayesian multilevel regression, using these models to explore individual differences in visualization skill from multiple perspectives. The results from experiments and modeling indicate that some people show patterns of accuracy that credibly deviate from the canonical rankings of visualization effectiveness. We discuss implications of these findings, such as a need for new ways to communicate visualization effectiveness to designers, how patterns in individuals' responses may show systematic biases and strategies in visualization judgment, and how recasting classic visual perception tasks as tools for assessing individual performance may offer new ways to quantify aspects of visualization literacy. Experiment data, source code, and analysis scripts are available at the following repository: <uri>https://osf.io/8ub7t/?view_only=9be4798797404a4397be3c6fc2a68cc0</uri>.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Graphical perception studies typically measure visualization encoding effectiveness using the error of an “average observer”, leading to canonical rankings of encodings for numerical attributes: <italic>e.g.</italic>, position <inline-formula><tex-math notation=\"LaTeX\">$>$</tex-math></inline-formula> area <inline-formula><tex-math notation=\"LaTeX\">$>$</tex-math></inline-formula> angle <inline-formula><tex-math notation=\"LaTeX\">$>$</tex-math></inline-formula> volume. Yet different people may vary in their ability to read different visualization types, leading to variance in this ranking across individuals not captured by population-level metrics using “average observer” models. One way we can bridge this gap is by recasting classic visual perception tasks as tools for assessing individual performance, in addition to overall visualization performance. In this paper we replicate and extend Cleveland and McGill's graphical comparison experiment using Bayesian multilevel regression, using these models to explore individual differences in visualization skill from multiple perspectives. The results from experiments and modeling indicate that some people show patterns of accuracy that credibly deviate from the canonical rankings of visualization effectiveness. We discuss implications of these findings, such as a need for new ways to communicate visualization effectiveness to designers, how patterns in individuals' responses may show systematic biases and strategies in visualization judgment, and how recasting classic visual perception tasks as tools for assessing individual performance may offer new ways to quantify aspects of visualization literacy. Experiment data, source code, and analysis scripts are available at the following repository: <uri>https://osf.io/8ub7t/?view_only=9be4798797404a4397be3c6fc2a68cc0</uri>.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Graphical perception studies typically measure visualization encoding effectiveness using the error of an “average observer”, leading to canonical rankings of encodings for numerical attributes: e.g., position - area - angle - volume. Yet different people may vary in their ability to read different visualization types, leading to variance in this ranking across individuals not captured by population-level metrics using “average observer” models. One way we can bridge this gap is by recasting classic visual perception tasks as tools for assessing individual performance, in addition to overall visualization performance. In this paper we replicate and extend Cleveland and McGill's graphical comparison experiment using Bayesian multilevel regression, using these models to explore individual differences in visualization skill from multiple perspectives. The results from experiments and modeling indicate that some people show patterns of accuracy that credibly deviate from the canonical rankings of visualization effectiveness. We discuss implications of these findings, such as a need for new ways to communicate visualization effectiveness to designers, how patterns in individuals' responses may show systematic biases and strategies in visualization judgment, and how recasting classic visual perception tasks as tools for assessing individual performance may offer new ways to quantify aspects of visualization literacy. Experiment data, source code, and analysis scripts are available at the following repository: https://osf.io/8ub7t/?view_only=9be4798797404a4397be3c6fc2a68cc0.",
"title": "The Risks of Ranking: Revisiting Graphical Perception to Model Individual Differences in Visualization Performance",
"normalizedTitle": "The Risks of Ranking: Revisiting Graphical Perception to Model Individual Differences in Visualization Performance",
"fno": "09978718",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Task Analysis",
"Visualization",
"Correlation",
"Observers",
"Bars",
"Sociology",
"Visualization",
"Graphical Perception",
"Individual Differences"
],
"authors": [
{
"givenName": "Russell",
"surname": "Davis",
"fullName": "Russell Davis",
"affiliation": "Worcester Polytechnic Institute, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaoying",
"surname": "Pu",
"fullName": "Xiaoying Pu",
"affiliation": "University of California, Merced, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yiren",
"surname": "Ding",
"fullName": "Yiren Ding",
"affiliation": "Worcester Polytechnic Institute, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Brian D.",
"surname": "Hall",
"fullName": "Brian D. Hall",
"affiliation": "University of Michigan, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Karen",
"surname": "Bonilla",
"fullName": "Karen Bonilla",
"affiliation": "Worcester Polytechnic Institute, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mi",
"surname": "Feng",
"fullName": "Mi Feng",
"affiliation": "Worcester Polytechnic Institute, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Matthew",
"surname": "Kay",
"fullName": "Matthew Kay",
"affiliation": "Northwestern University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lane",
"surname": "Harrison",
"fullName": "Lane Harrison",
"affiliation": "Worcester Polytechnic Institute, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-16",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tp/2019/05/08344546",
"title": "What Makes Objects Similar: A Unified Multi-Metric Learning Approach",
"doi": null,
"abstractUrl": "/journal/tp/2019/05/08344546/13rRUNvgyXK",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ai/2023/02/09760177",
"title": "Imperceptible and Sparse Adversarial Attacks via a Dual-Population-Based Constrained Evolutionary Algorithm",
"doi": null,
"abstractUrl": "/journal/ai/2023/02/09760177/1CHsEUX81mU",
"parentPublication": {
"id": "trans/ai",
"title": "IEEE Transactions on Artificial Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/si/2022/06/09751236",
"title": "Memristors Enabled Computing Correlation Parameter In-Memory System: A Potential Alternative to Von Neumann Architecture",
"doi": null,
"abstractUrl": "/journal/si/2022/06/09751236/1CnxPa7I1gc",
"parentPublication": {
"id": "trans/si",
"title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/5555/01/09963545",
"title": "Parallel Core Maintenance of Dynamic Graphs",
"doi": null,
"abstractUrl": "/journal/tk/5555/01/09963545/1Iz0MCWUJhe",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10021892",
"title": "Discrete Morse Sandwich: Fast Computation of Persistence Diagrams for Scalar Data – An Algorithm and A Benchmark",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10021892/1K3XDAtRZ8Q",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/5555/01/10078319",
"title": "Top-<inline-formula><tex-math notation=\"LaTeX\">Z_$k$_Z</tex-math></inline-formula> Community Similarity Search Over Large-Scale Road Networks",
"doi": null,
"abstractUrl": "/journal/tk/5555/01/10078319/1LIN5YpM6HK",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/su/5555/01/10071951",
"title": "Container Session Level Traffic Prediction from Network Interface Usage",
"doi": null,
"abstractUrl": "/journal/su/5555/01/10071951/1LxaR4vPy5G",
"parentPublication": {
"id": "trans/su",
"title": "IEEE Transactions on Sustainable Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2022/03/09311251",
"title": "Exploring Individual Differences of Public Speaking Anxiety in Real-Life and Virtual Presentations",
"doi": null,
"abstractUrl": "/journal/ta/2022/03/09311251/1pYWAX0Po6A",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/02/09451590",
"title": "Conceptual Metaphor and Graphical Convention Influence the Interpretation of Line Graphs",
"doi": null,
"abstractUrl": "/journal/tg/2022/02/09451590/1ujXLK9Vgac",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2023/04/09662190",
"title": "Higher-Order Truss Decomposition in Graphs",
"doi": null,
"abstractUrl": "/journal/tk/2023/04/09662190/1zzl2ZAAVvq",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09973820",
"articleId": "1IUAPHcYiD6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09978713",
"articleId": "1IXUnnVaWoE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1IXUnNBj0Yw",
"doi": "10.1109/TVCG.2022.3227999",
"abstract": "When learning a motor skill it is helpful to get corrective feedback from an instructor. This will support the learner to execute the movement correctly. With modern technology, it is possible to provide this feedback via mixed reality. In most cases, this involves visual cues to help the user understand the corrective feedback. We analyzed recent research approaches utilizing visual cues for feedback in mixed reality. The scope of this paper is visual feedback for motor skill learning, which involves physical therapy, exercise, rehabilitation etc. While some of the surveyed literature discusses therapeutic effects of the training, this paper focuses on visualization techniques. We categorized the literature from a visualization standpoint, including visual cues, technology and characteristics of the feedback. This provided insights into how visual feedback in mixed reality is applied in the literature and how different aspects of the feedback are related. The insights obtained can help to better adjust future feedback systems to the target group and their needs. This paper also provides a deeper understanding of the characteristics of the visual cues in general and promotes future, more detailed research on this topic.",
"abstracts": [
{
"abstractType": "Regular",
"content": "When learning a motor skill it is helpful to get corrective feedback from an instructor. This will support the learner to execute the movement correctly. With modern technology, it is possible to provide this feedback via mixed reality. In most cases, this involves visual cues to help the user understand the corrective feedback. We analyzed recent research approaches utilizing visual cues for feedback in mixed reality. The scope of this paper is visual feedback for motor skill learning, which involves physical therapy, exercise, rehabilitation etc. While some of the surveyed literature discusses therapeutic effects of the training, this paper focuses on visualization techniques. We categorized the literature from a visualization standpoint, including visual cues, technology and characteristics of the feedback. This provided insights into how visual feedback in mixed reality is applied in the literature and how different aspects of the feedback are related. The insights obtained can help to better adjust future feedback systems to the target group and their needs. This paper also provides a deeper understanding of the characteristics of the visual cues in general and promotes future, more detailed research on this topic.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "When learning a motor skill it is helpful to get corrective feedback from an instructor. This will support the learner to execute the movement correctly. With modern technology, it is possible to provide this feedback via mixed reality. In most cases, this involves visual cues to help the user understand the corrective feedback. We analyzed recent research approaches utilizing visual cues for feedback in mixed reality. The scope of this paper is visual feedback for motor skill learning, which involves physical therapy, exercise, rehabilitation etc. While some of the surveyed literature discusses therapeutic effects of the training, this paper focuses on visualization techniques. We categorized the literature from a visualization standpoint, including visual cues, technology and characteristics of the feedback. This provided insights into how visual feedback in mixed reality is applied in the literature and how different aspects of the feedback are related. The insights obtained can help to better adjust future feedback systems to the target group and their needs. This paper also provides a deeper understanding of the characteristics of the visual cues in general and promotes future, more detailed research on this topic.",
"title": "Visual Cue Based Corrective Feedback for Motor Skill Training in Mixed Reality: A Survey",
"normalizedTitle": "Visual Cue Based Corrective Feedback for Motor Skill Training in Mixed Reality: A Survey",
"fno": "09978915",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visualization",
"Mixed Reality",
"Augmented Reality",
"Training",
"Task Analysis",
"Sports",
"Feedback Loop",
"Human Centered Computing",
"Visualization",
"Visualization Techniques And Methodologies",
"Interaction Techniques",
"Virtual And Augmented Reality"
],
"authors": [
{
"givenName": "Florian",
"surname": "Diller",
"fullName": "Florian Diller",
"affiliation": "UX-Vis group at Hochschule Worms University of Applied Sciences, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gerik",
"surname": "Scheuermann",
"fullName": "Gerik Scheuermann",
"affiliation": "BSV group at Universität Leipzig, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alexander",
"surname": "Wiebel",
"fullName": "Alexander Wiebel",
"affiliation": "UX-Vis group at Hochschule Worms University of Applied Sciences, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar-amh/2009/5508/0/05336726",
"title": "Loosely-coupled mixed reality: Using the environment metaphorically",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-amh/2009/05336726/12OmNCbU2Wk",
"parentPublication": {
"id": "proceedings/ismar-amh/2009/5508/0",
"title": "2009 IEEE International Symposium on Mixed and Augmented Reality - Arts, Media and Humanities",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892292",
"title": "Corrective feedback for depth perception in CAVE-like systems",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892292/12OmNrNh0Ml",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icce/2002/1509/0/15091459",
"title": "A Research on the Types of the Web Based Corrective Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/icce/2002/15091459/12OmNvT2oZw",
"parentPublication": {
"id": "proceedings/icce/2002/1509/0",
"title": "Computers in Education, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvvrhc/1998/8283/0/82830078",
"title": "Vision and Graphics in Producing Mixed Reality Worlds",
"doi": null,
"abstractUrl": "/proceedings-article/cvvrhc/1998/82830078/12OmNylbov1",
"parentPublication": {
"id": "proceedings/cvvrhc/1998/8283/0",
"title": "Computer Vision for Virtual Reality Based Human Communications, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2012/4702/0/4702a332",
"title": "Feedback in the Motor Skill Domain",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2012/4702a332/12OmNzIUfYD",
"parentPublication": {
"id": "proceedings/icalt/2012/4702/0",
"title": "Advanced Learning Technologies, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/02/ttg2012020332",
"title": "DVV: A Taxonomy for Mixed Reality Visualization in Image Guided Surgery",
"doi": null,
"abstractUrl": "/journal/tg/2012/02/ttg2012020332/13rRUILtJzx",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699334",
"title": "The Deployment of a Mixed Reality Experience for a Small-Scale Exhibition in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699334/19F1OQnjwSk",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a891",
"title": "MR-FoodCoach: Enabling a convenience store on mixed reality space for healthier purchases",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a891/1J7WnK9PRxS",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2022/5725/0/572500a149",
"title": "Active Visualization of Visual Cues on Hand for Better User Interface Design Generalization in Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2022/572500a149/1KmFaKoZWhy",
"parentPublication": {
"id": "proceedings/aivr/2022/5725/0",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a387",
"title": "Watch-Your-Skiing: Visualizations for VR Skiing using Real-time Body Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a387/1yeQDGki96U",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09978684",
"articleId": "1IXUnEM2oc8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09982378",
"articleId": "1J2T8H9Y2Ws",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1IUAPHcYiD6",
"doi": "10.1109/TVCG.2022.3226689",
"abstract": "We introduce an end-to-end learning framework for <italic>image-to-image composition</italic>, aiming to plausibly compose an object represented as a cropped patch from an object image into a background scene image. As our approach emphasizes more on semantic and structural coherence of the composed images, rather than their pixel-level RGB accuracies, we tailor the input and output of our network with <italic>structure-aware</italic> features and design our network losses accordingly, with ground truth established in a <italic>self-supervised</italic> setting through the object cropping. Specifically, our network takes the semantic layout features from the input scene image, features encoded from the edges and silhouette in the input object patch, as well as a latent code as inputs, and generates a 2D spatial affine transform defining the translation and scaling of the object patch. The learned parameters are further fed into a differentiable spatial transformer network to transform the object patch into the target image, where our model is trained adversarially using an affine transform discriminator and a layout discriminator. We evaluate our network, coined SAC-GAN, for various image composition scenarios in terms of quality, composability, and generalizability of the composite images. Comparisons are made to state-of-the-art alternatives, including Instance Insertion, ST-GAN, CompGAN and PlaceNet, confirming superiority of our method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We introduce an end-to-end learning framework for <italic>image-to-image composition</italic>, aiming to plausibly compose an object represented as a cropped patch from an object image into a background scene image. As our approach emphasizes more on semantic and structural coherence of the composed images, rather than their pixel-level RGB accuracies, we tailor the input and output of our network with <italic>structure-aware</italic> features and design our network losses accordingly, with ground truth established in a <italic>self-supervised</italic> setting through the object cropping. Specifically, our network takes the semantic layout features from the input scene image, features encoded from the edges and silhouette in the input object patch, as well as a latent code as inputs, and generates a 2D spatial affine transform defining the translation and scaling of the object patch. The learned parameters are further fed into a differentiable spatial transformer network to transform the object patch into the target image, where our model is trained adversarially using an affine transform discriminator and a layout discriminator. We evaluate our network, coined SAC-GAN, for various image composition scenarios in terms of quality, composability, and generalizability of the composite images. Comparisons are made to state-of-the-art alternatives, including Instance Insertion, ST-GAN, CompGAN and PlaceNet, confirming superiority of our method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We introduce an end-to-end learning framework for image-to-image composition, aiming to plausibly compose an object represented as a cropped patch from an object image into a background scene image. As our approach emphasizes more on semantic and structural coherence of the composed images, rather than their pixel-level RGB accuracies, we tailor the input and output of our network with structure-aware features and design our network losses accordingly, with ground truth established in a self-supervised setting through the object cropping. Specifically, our network takes the semantic layout features from the input scene image, features encoded from the edges and silhouette in the input object patch, as well as a latent code as inputs, and generates a 2D spatial affine transform defining the translation and scaling of the object patch. The learned parameters are further fed into a differentiable spatial transformer network to transform the object patch into the target image, where our model is trained adversarially using an affine transform discriminator and a layout discriminator. We evaluate our network, coined SAC-GAN, for various image composition scenarios in terms of quality, composability, and generalizability of the composite images. Comparisons are made to state-of-the-art alternatives, including Instance Insertion, ST-GAN, CompGAN and PlaceNet, confirming superiority of our method.",
"title": "SAC-GAN: Structure-Aware Image Composition",
"normalizedTitle": "SAC-GAN: Structure-Aware Image Composition",
"fno": "09973820",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Layout",
"Transforms",
"Semantics",
"Three Dimensional Displays",
"Image Edge Detection",
"Codes",
"Coherence",
"GA Ns",
"Self Supervision",
"Structure Aware Image Composition"
],
"authors": [
{
"givenName": "Hang",
"surname": "Zhou",
"fullName": "Hang Zhou",
"affiliation": "School of Computing Science, Simon Fraser University, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rui",
"surname": "Ma",
"fullName": "Rui Ma",
"affiliation": "School of Artificial Intelligence, Jilin University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ling-Xiao",
"surname": "Zhang",
"fullName": "Ling-Xiao Zhang",
"affiliation": "Institute of Computing Technology, Chinese Academy of Sciences, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lin",
"surname": "Gao",
"fullName": "Lin Gao",
"affiliation": "Institute of Computing Technology, Chinese Academy of Sciences, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ali",
"surname": "Mahdavi-Amiri",
"fullName": "Ali Mahdavi-Amiri",
"affiliation": "School of Computing Science, Simon Fraser University, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hao",
"surname": "Zhang",
"fullName": "Hao Zhang",
"affiliation": "School of Computing Science, Simon Fraser University, Canada",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2018/6420/0/642000b848",
"title": "Disentangling Structure and Aesthetics for Style-Aware Image Completion",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000b848/17D45VTRovJ",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600h754",
"title": "Modeling Image Composition for Complex Scene Generation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600h754/1H0L0Gta396",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600l1294",
"title": "StyleSwin: Transformer-based GAN for High-resolution Image Generation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600l1294/1H1lc56wESc",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600s8218",
"title": "A Style-aware Discriminator for Controllable Image Translation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600s8218/1H1m9v63vsk",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102794",
"title": "Eigan: Enhanced Inpainting Generative Adversarial Network",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102794/1kwrlxsf48o",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800e675",
"title": "DOA-GAN: Dual-Order Attentive Generative Adversarial Network for Image Copy-Move Forgery Detection and Localization",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800e675/1m3oox77sD6",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412539",
"title": "A Self-supervised GAN for Unsupervised Few-shot Object Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412539/1tmi26uWlws",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412305",
"title": "Position-aware and Symmetry Enhanced GAN for Radial Distortion Correction",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412305/1tmiXDlffNK",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/09/09427066",
"title": "Learning Layout and Style Reconfigurable GANs for Controllable Image Synthesis",
"doi": null,
"abstractUrl": "/journal/tp/2022/09/09427066/1tuvzMfndhS",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2021/4899/0/489900a203",
"title": "DW-GAN: A Discrete Wavelet Transform GAN for NonHomogeneous Dehazing",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2021/489900a203/1yJYrxYT1hS",
"parentPublication": {
"id": "proceedings/cvprw/2021/4899/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09969571",
"articleId": "1IMidH7hZhC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09978718",
"articleId": "1IXUnbRdUEE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1IMicNIXex2",
"doi": "10.1109/TVCG.2022.3226218",
"abstract": "Color has been widely used to encode data in all types of visualizations. Effective color palettes contain discriminable and harmonious colors, which allow information from visualizations to be accurately and aesthetically conveyed. However, predefined color palettes not only lack the flexibility of custom color palette generation but also ignore the context in which the visualizations are used. Designing an effective color palette is a time-consuming and challenging process for users, even experts. In this work, we propose the generation of an image-based visualization color palette to exploit the human perception of visually appealing images while considering visualization cognition. By analyzing color palette constraints, including harmony, discrimination, and context, we propose an image-driven color generation method. We design a color clustering method in the saliency-hue plane based on visual importance detection and then select the palette based on the visualization color constraints. In addition, we design two color optimization and assignment strategies for visualizations of different data types. Evaluations through numeric indicators and user experiments demonstrate that the palettes predicted by our method are visually related to the original images and are aesthetically pleasing, supporting diverse visualization contexts and data types in practical applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Color has been widely used to encode data in all types of visualizations. Effective color palettes contain discriminable and harmonious colors, which allow information from visualizations to be accurately and aesthetically conveyed. However, predefined color palettes not only lack the flexibility of custom color palette generation but also ignore the context in which the visualizations are used. Designing an effective color palette is a time-consuming and challenging process for users, even experts. In this work, we propose the generation of an image-based visualization color palette to exploit the human perception of visually appealing images while considering visualization cognition. By analyzing color palette constraints, including harmony, discrimination, and context, we propose an image-driven color generation method. We design a color clustering method in the saliency-hue plane based on visual importance detection and then select the palette based on the visualization color constraints. In addition, we design two color optimization and assignment strategies for visualizations of different data types. Evaluations through numeric indicators and user experiments demonstrate that the palettes predicted by our method are visually related to the original images and are aesthetically pleasing, supporting diverse visualization contexts and data types in practical applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Color has been widely used to encode data in all types of visualizations. Effective color palettes contain discriminable and harmonious colors, which allow information from visualizations to be accurately and aesthetically conveyed. However, predefined color palettes not only lack the flexibility of custom color palette generation but also ignore the context in which the visualizations are used. Designing an effective color palette is a time-consuming and challenging process for users, even experts. In this work, we propose the generation of an image-based visualization color palette to exploit the human perception of visually appealing images while considering visualization cognition. By analyzing color palette constraints, including harmony, discrimination, and context, we propose an image-driven color generation method. We design a color clustering method in the saliency-hue plane based on visual importance detection and then select the palette based on the visualization color constraints. In addition, we design two color optimization and assignment strategies for visualizations of different data types. Evaluations through numeric indicators and user experiments demonstrate that the palettes predicted by our method are visually related to the original images and are aesthetically pleasing, supporting diverse visualization contexts and data types in practical applications.",
"title": "Image-Driven Harmonious Color Palette Generation for Diverse Information Visualization",
"normalizedTitle": "Image-Driven Harmonious Color Palette Generation for Diverse Information Visualization",
"fno": "09969167",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Image Color Analysis",
"Data Visualization",
"Visualization",
"Task Analysis",
"Encoding",
"Visual Perception",
"Media",
"Color Assignment",
"Color Palette",
"Information Visualization",
"Visual Perception",
"Visualization Design"
],
"authors": [
{
"givenName": "Shuqi",
"surname": "Liu",
"fullName": "Shuqi Liu",
"affiliation": "School of Computer Science and Technology, East China Normal University, Shanghai, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mingtian",
"surname": "Tao",
"fullName": "Mingtian Tao",
"affiliation": "School of Computer Science and Technology, East China Normal University, Shanghai, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yifei",
"surname": "Huang",
"fullName": "Yifei Huang",
"affiliation": "School of Computer Science and Technology, East China Normal University, Shanghai, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Changbo",
"surname": "Wang",
"fullName": "Changbo Wang",
"affiliation": "School of Computer Science and Technology, East China Normal University, Shanghai, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chenhui",
"surname": "Li",
"fullName": "Chenhui Li",
"affiliation": "School of Computer Science and Technology, East China Normal University, Shanghai, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-16",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icip/1997/8183/1/81831830",
"title": "Adaptive palette determination for color images based on Kohonen networks",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1997/81831830/12OmNAnMuHl",
"parentPublication": {
"id": "proceedings/icip/1997/8183/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigmm/2016/2179/0/2179a266",
"title": "Texture Compression with Hierarchical Palette",
"doi": null,
"abstractUrl": "/proceedings-article/bigmm/2016/2179a266/12OmNzYeAKH",
"parentPublication": {
"id": "proceedings/bigmm/2016/2179/0",
"title": "2016 IEEE Second International Conference on Multimedia Big Data (BigMM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/10/ttg2013101746",
"title": "Perceptually Driven Visibility Optimization for Categorical Data Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2013/10/ttg2013101746/13rRUwI5Ug7",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/06/07911336",
"title": "Color Orchestra: Ordering Color Palettes for Interpolation and Prediction",
"doi": null,
"abstractUrl": "/journal/tg/2018/06/07911336/13rRUxASu0R",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/06/ttg2008061739",
"title": "Color Design for Illustrative Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2008/06/ttg2008061739/13rRUxE04tv",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07539386",
"title": "Colorgorical: Creating discriminable and preferable color palettes for information visualization",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07539386/13rRUxlgy3M",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192709",
"title": "A Linguistic Approach to Categorical Color Assignment for Data Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192709/13rRUyYjKaj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2022/9007/0/900700a040",
"title": "Affective Color Palette Recommendations with Non-negative Tensor Factorization",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2022/900700a040/1KaH7ehIrGU",
"parentPublication": {
"id": "proceedings/iv/2022/9007/0",
"title": "2022 26th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600d610",
"title": "Color Recommendation for Vector Graphic Documents based on Multi-Palette Representation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600d610/1KxUnpzWb3q",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08805429",
"title": "Color Crafting: Automating the Construction of Designer Quality Color Ramps",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08805429/1cG4w5XPNUQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09968104",
"articleId": "1IKDek8SF0c",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09969571",
"articleId": "1IMidH7hZhC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1IRiLbGuc7e",
"name": "ttg555501-09969167s1-supp1-3226218.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09969167s1-supp1-3226218.pdf",
"extension": "pdf",
"size": "5.23 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1IMidH7hZhC",
"doi": "10.1109/TVCG.2022.3225844",
"abstract": "Virtual content creation and interaction play an important role in modern 3D applications. Recovering detailed 3D models from real scenes can significantly expand the scope of its applications and has been studied for decades in the computer vision and computer graphics community. In this work, we propose Vox-Surf, a voxel-based implicit surface representation. Our Vox-Surf divides the space into finite sparse voxels, where each voxel is a basic geometry unit that stores geometry and appearance information on its corner vertices. Due to the sparsity inherited from the voxel representation, Vox-Surf is suitable for almost any scene and can be easily trained end-to-end from multiple view images. We utilize a progressive training process to gradually cull out empty voxels and keep only valid voxels for further optimization, which greatly reduces the number of sample points and improves inference speed. Experiments show that our Vox-Surf representation can learn fine surface details and accurate colors with less memory and faster rendering than previous methods. The resulting fine voxels can also be considered as the bounding volumes for collision detection, which is useful in 3D interactions. We also show the potential application of Vox-Surf in scene editing and augmented reality. The source code is publicly available at <uri>https://github.com/zju3dv/Vox-Surf</uri>.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual content creation and interaction play an important role in modern 3D applications. Recovering detailed 3D models from real scenes can significantly expand the scope of its applications and has been studied for decades in the computer vision and computer graphics community. In this work, we propose Vox-Surf, a voxel-based implicit surface representation. Our Vox-Surf divides the space into finite sparse voxels, where each voxel is a basic geometry unit that stores geometry and appearance information on its corner vertices. Due to the sparsity inherited from the voxel representation, Vox-Surf is suitable for almost any scene and can be easily trained end-to-end from multiple view images. We utilize a progressive training process to gradually cull out empty voxels and keep only valid voxels for further optimization, which greatly reduces the number of sample points and improves inference speed. Experiments show that our Vox-Surf representation can learn fine surface details and accurate colors with less memory and faster rendering than previous methods. The resulting fine voxels can also be considered as the bounding volumes for collision detection, which is useful in 3D interactions. We also show the potential application of Vox-Surf in scene editing and augmented reality. The source code is publicly available at <uri>https://github.com/zju3dv/Vox-Surf</uri>.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual content creation and interaction play an important role in modern 3D applications. Recovering detailed 3D models from real scenes can significantly expand the scope of its applications and has been studied for decades in the computer vision and computer graphics community. In this work, we propose Vox-Surf, a voxel-based implicit surface representation. Our Vox-Surf divides the space into finite sparse voxels, where each voxel is a basic geometry unit that stores geometry and appearance information on its corner vertices. Due to the sparsity inherited from the voxel representation, Vox-Surf is suitable for almost any scene and can be easily trained end-to-end from multiple view images. We utilize a progressive training process to gradually cull out empty voxels and keep only valid voxels for further optimization, which greatly reduces the number of sample points and improves inference speed. Experiments show that our Vox-Surf representation can learn fine surface details and accurate colors with less memory and faster rendering than previous methods. The resulting fine voxels can also be considered as the bounding volumes for collision detection, which is useful in 3D interactions. We also show the potential application of Vox-Surf in scene editing and augmented reality. The source code is publicly available at https://github.com/zju3dv/Vox-Surf.",
"title": "Vox-Surf: Voxel-Based Implicit Surface Representation",
"normalizedTitle": "Vox-Surf: Voxel-Based Implicit Surface Representation",
"fno": "09969571",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Surface Reconstruction",
"Image Reconstruction",
"Geometry",
"Rendering Computer Graphics",
"Three Dimensional Displays",
"Feature Extraction",
"Surface Treatment",
"Surface Reconstruction",
"Implicit Representation",
"Scene Editing"
],
"authors": [
{
"givenName": "Hai",
"surname": "Li",
"fullName": "Hai Li",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xingrui",
"surname": "Yang",
"fullName": "Xingrui Yang",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hongjia",
"surname": "Zhai",
"fullName": "Hongjia Zhai",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yuqian",
"surname": "Liu",
"fullName": "Yuqian Liu",
"affiliation": "Visual Information Laboratory, University of Bristol, Bristol, United Kingdom",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hujun",
"surname": "Bao",
"fullName": "Hujun Bao",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guofeng",
"surname": "Zhang",
"fullName": "Guofeng Zhang",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/candar/2014/4152/0/4152a367",
"title": "A Memory Efficient Parallel Method for Voxel-Based Multiview Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/candar/2014/4152a367/12OmNAJDBw6",
"parentPublication": {
"id": "proceedings/candar/2014/4152/0",
"title": "2014 Second International Symposium on Computing and Networking (CANDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2017/2610/0/261001a412",
"title": "Hierarchical Surface Prediction for 3D Object Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a412/12OmNy4r3YL",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/06/08630046",
"title": "Hierarchical Surface Prediction",
"doi": null,
"abstractUrl": "/journal/tp/2020/06/08630046/17D45Xcttk2",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200d144",
"title": "Voxel Transformer for 3D Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200d144/1BmHhfdx7dm",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/09844250",
"title": "Voxel-Mesh Network for Geodesic-Aware 3D Semantic Segmentation of Indoor Scenes",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/09844250/1FnqTVYetzy",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600g270",
"title": "Gradient-SDF: A Semi-Implicit Surface Representation for 3D Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600g270/1H0MXW1GTN6",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2022/5670/0/567000a433",
"title": "GO-Surf: Neural Feature Grid Optimization for Fast, High-Fidelity RGB-D Surface Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2022/567000a433/1KYso11QHx6",
"parentPublication": {
"id": "proceedings/3dv/2022/5670/0",
"title": "2022 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300e742",
"title": "Implicit Surface Representations As Layers in Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300e742/1hVlBZYxMe4",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800b290",
"title": "Deep Implicit Volume Compression",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800b290/1m3oiGZMFwc",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/06/09339892",
"title": "Digital Surface Regularization With Guarantees",
"doi": null,
"abstractUrl": "/journal/tg/2021/06/09339892/1qLhYrSA4ve",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09969167",
"articleId": "1IMicNIXex2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09973820",
"articleId": "1IUAPHcYiD6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1IKDeaftuUM",
"doi": "10.1109/TVCG.2022.3225526",
"abstract": "Volumetric data abounds in medical imaging and other fields. With the improved imaging quality and the increased resolution, volumetric datasets are getting so large that the existing tools have become inadequate for processing and analyzing the data. Here we consider the problem of computing tetrahedral meshes to represent large volumetric datasets with labeled multiple materials, which are often encountered in medical imaging or microscopy optical slice tomography. Such tetrahedral meshes are a more compact and expressive geometric representation so are in demand for efficient visualization and simulation of the data, which are impossible if the original large volumetric data are used directly due to the large memory requirement. Existing methods for meshing volumetric data are not scalable for handling large datasets due to their sheer demand on excessively large run-time memory or failure to produce a tet-mesh that preserves the multi-material structure of the original volumetric data. In this paper we propose a novel approach, called <italic>Marching Windows</italic>, that uses a moving window and a disk-swap strategy to reduce the run-time memory footprint, devise a new scheme that guarantees to preserve the topological structure of the original dataset, and adopt an error-guided optimization technique to improve both geometric approximation error and mesh quality. Extensive experiments show that our method is capable of processing very large volumetric datasets beyond the capability of the existing methods and producing tetrahedral meshes of high quality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Volumetric data abounds in medical imaging and other fields. With the improved imaging quality and the increased resolution, volumetric datasets are getting so large that the existing tools have become inadequate for processing and analyzing the data. Here we consider the problem of computing tetrahedral meshes to represent large volumetric datasets with labeled multiple materials, which are often encountered in medical imaging or microscopy optical slice tomography. Such tetrahedral meshes are a more compact and expressive geometric representation so are in demand for efficient visualization and simulation of the data, which are impossible if the original large volumetric data are used directly due to the large memory requirement. Existing methods for meshing volumetric data are not scalable for handling large datasets due to their sheer demand on excessively large run-time memory or failure to produce a tet-mesh that preserves the multi-material structure of the original volumetric data. In this paper we propose a novel approach, called <italic>Marching Windows</italic>, that uses a moving window and a disk-swap strategy to reduce the run-time memory footprint, devise a new scheme that guarantees to preserve the topological structure of the original dataset, and adopt an error-guided optimization technique to improve both geometric approximation error and mesh quality. Extensive experiments show that our method is capable of processing very large volumetric datasets beyond the capability of the existing methods and producing tetrahedral meshes of high quality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Volumetric data abounds in medical imaging and other fields. With the improved imaging quality and the increased resolution, volumetric datasets are getting so large that the existing tools have become inadequate for processing and analyzing the data. Here we consider the problem of computing tetrahedral meshes to represent large volumetric datasets with labeled multiple materials, which are often encountered in medical imaging or microscopy optical slice tomography. Such tetrahedral meshes are a more compact and expressive geometric representation so are in demand for efficient visualization and simulation of the data, which are impossible if the original large volumetric data are used directly due to the large memory requirement. Existing methods for meshing volumetric data are not scalable for handling large datasets due to their sheer demand on excessively large run-time memory or failure to produce a tet-mesh that preserves the multi-material structure of the original volumetric data. In this paper we propose a novel approach, called Marching Windows, that uses a moving window and a disk-swap strategy to reduce the run-time memory footprint, devise a new scheme that guarantees to preserve the topological structure of the original dataset, and adopt an error-guided optimization technique to improve both geometric approximation error and mesh quality. Extensive experiments show that our method is capable of processing very large volumetric datasets beyond the capability of the existing methods and producing tetrahedral meshes of high quality.",
"title": "Marching Windows: Scalable Mesh Generation for Volumetric Data with Multiple Materials",
"normalizedTitle": "Marching Windows: Scalable Mesh Generation for Volumetric Data with Multiple Materials",
"fno": "09968044",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Topology",
"Mesh Generation",
"Three Dimensional Displays",
"Imaging",
"Data Visualization",
"Memory Management",
"Measurement",
"Large Volumetric Data",
"Multiple Material",
"Marching Windows",
"Mesh Simplification",
"Topology Guarantee"
],
"authors": [
{
"givenName": "Wenhua",
"surname": "Zhang",
"fullName": "Wenhua Zhang",
"affiliation": "Department of Computer Science, The University of Hong Kong, Hong Kong SAR, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yating",
"surname": "Yue",
"fullName": "Yating Yue",
"affiliation": "Department of Computer Science, The University of Hong Kong, Hong Kong SAR, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hao",
"surname": "Pan",
"fullName": "Hao Pan",
"affiliation": "Microsoft Research Asia, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhonggui",
"surname": "Chen",
"fullName": "Zhonggui Chen",
"affiliation": "School of Informatics, Xiamen University, Xiamen, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chuan",
"surname": "Wang",
"fullName": "Chuan Wang",
"affiliation": "Department of Computer Science, The University of Hong Kong, Hong Kong SAR, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hanspeter",
"surname": "Pfister",
"fullName": "Hanspeter Pfister",
"affiliation": "John A. Paulson School of Engineering and Applied Sciences, Harvard University, Cambridge, MA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wenping",
"surname": "Wang",
"fullName": "Wenping Wang",
"affiliation": "Department of Visualization, Texas A&M University, TX, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-11-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/isise/2008/3494/2/3494b414",
"title": "A Tetrahedral Mesh Generation Algorithm from Medical Images",
"doi": null,
"abstractUrl": "/proceedings-article/isise/2008/3494b414/12OmNBUS7cC",
"parentPublication": {
"id": "proceedings/isise/2008/3494/2",
"title": "2008 International Symposium on Information Science and Engineering (ISISE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icycs/2008/3398/0/3398a925",
"title": "Generation of Three-Dimensional Finite Element Meshes from CT Dataset of Human Femurs",
"doi": null,
"abstractUrl": "/proceedings-article/icycs/2008/3398a925/12OmNCdBDEg",
"parentPublication": {
"id": "proceedings/icycs/2008/3398/0",
"title": "2008 9th International Conference for Young Computer Scientists",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2017/1710/0/1710a085",
"title": "A Differential Geometry Approach for Change Detection in Medical Images",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2017/1710a085/12OmNrAMEM6",
"parentPublication": {
"id": "proceedings/cbms/2017/1710/0",
"title": "2017 IEEE 30th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1997/8262/0/82620387",
"title": "The multilevel finite element method for adaptive mesh optimization and visualization of volume data",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1997/82620387/12OmNrAMEQ3",
"parentPublication": {
"id": "proceedings/ieee-vis/1997/8262/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/01532822",
"title": "Quality mesh generation for molecular skin surfaces using restricted union of balls",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/01532822/12OmNvSbBIy",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pdcat/2016/5081/0/07943387",
"title": "Tetrahedral Mesh Segmentation Based on Quality Criteria",
"doi": null,
"abstractUrl": "/proceedings-article/pdcat/2016/07943387/12OmNwM6A1x",
"parentPublication": {
"id": "proceedings/pdcat/2016/5081/0",
"title": "2016 17th International Conference on Parallel and Distributed Computing, Applications and Technologies (PDCAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2002/7498/0/7498chopra",
"title": "TetFusion: An Algorithm For Rapid Tetrahedral Mesh Simplification",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2002/7498chopra/12OmNyQphf1",
"parentPublication": {
"id": "proceedings/ieee-vis/2002/7498/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/09/ttg2013091539",
"title": "Surface Mesh to Volumetric Spline Conversion with Generalized Polycubes",
"doi": null,
"abstractUrl": "/journal/tg/2013/09/ttg2013091539/13rRUEgarsH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/02/ttg2014020223",
"title": "Lattice Cleaving: A Multimaterial Tetrahedral Meshing Algorithm with Guarantees",
"doi": null,
"abstractUrl": "/journal/tg/2014/02/ttg2014020223/13rRUEgs2M2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse-euc/2017/3220/1/08005847",
"title": "A Volumetric Shape Registration Based on Locally Affine-Invariant Constraint",
"doi": null,
"abstractUrl": "/proceedings-article/cse-euc/2017/08005847/17D45XoXP6p",
"parentPublication": {
"id": "proceedings/cse-euc/2017/3220/1",
"title": "2017 IEEE International Conference on Computational Science and Engineering (CSE) and IEEE International Conference on Embedded and Ubiquitous Computing (EUC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09966829",
"articleId": "1IIYlkz8kkE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09968104",
"articleId": "1IKDek8SF0c",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1IKDek8SF0c",
"doi": "10.1109/TVCG.2022.3224674",
"abstract": "We introduce a framework for compactly representing light field content with the novel concept of neural subspaces. While the recently proposed neural light field representation achieves great compression results by encoding a light field into a single neural network, the unified design is not optimized for the composite structures exhibited in light fields. Moreover, encoding every part of the light field into one network is not ideal for applications that require rapid transmission and decoding. We recognize this problem's connection to subspace learning. We present a method that uses several small neural networks, specializing in learning the neural subspace for a particular light field segment. Moreover, we propose an adaptive weight sharing strategy among those small networks, improving parameter efficiency. In effect, this strategy enables a concerted way to track the similarity among nearby neural subspaces by leveraging the layered structure of neural networks. Furthermore, we develop a soft-classification technique to enhance the color prediction accuracy of neural representations. Our experimental results show that our method better reconstructs the light field than previous methods on various light field scenes. We further demonstrate its successful deployment on encoding light fields with irregular viewpoint layout and dynamic scene content.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We introduce a framework for compactly representing light field content with the novel concept of neural subspaces. While the recently proposed neural light field representation achieves great compression results by encoding a light field into a single neural network, the unified design is not optimized for the composite structures exhibited in light fields. Moreover, encoding every part of the light field into one network is not ideal for applications that require rapid transmission and decoding. We recognize this problem's connection to subspace learning. We present a method that uses several small neural networks, specializing in learning the neural subspace for a particular light field segment. Moreover, we propose an adaptive weight sharing strategy among those small networks, improving parameter efficiency. In effect, this strategy enables a concerted way to track the similarity among nearby neural subspaces by leveraging the layered structure of neural networks. Furthermore, we develop a soft-classification technique to enhance the color prediction accuracy of neural representations. Our experimental results show that our method better reconstructs the light field than previous methods on various light field scenes. We further demonstrate its successful deployment on encoding light fields with irregular viewpoint layout and dynamic scene content.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We introduce a framework for compactly representing light field content with the novel concept of neural subspaces. While the recently proposed neural light field representation achieves great compression results by encoding a light field into a single neural network, the unified design is not optimized for the composite structures exhibited in light fields. Moreover, encoding every part of the light field into one network is not ideal for applications that require rapid transmission and decoding. We recognize this problem's connection to subspace learning. We present a method that uses several small neural networks, specializing in learning the neural subspace for a particular light field segment. Moreover, we propose an adaptive weight sharing strategy among those small networks, improving parameter efficiency. In effect, this strategy enables a concerted way to track the similarity among nearby neural subspaces by leveraging the layered structure of neural networks. Furthermore, we develop a soft-classification technique to enhance the color prediction accuracy of neural representations. Our experimental results show that our method better reconstructs the light field than previous methods on various light field scenes. We further demonstrate its successful deployment on encoding light fields with irregular viewpoint layout and dynamic scene content.",
"title": "Neural Subspaces for Light Fields",
"normalizedTitle": "Neural Subspaces for Light Fields",
"fno": "09968104",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Light Fields",
"Neural Networks",
"Videos",
"Image Color Analysis",
"Image Coding",
"Encoding",
"Dictionaries",
"Light Field Compression",
"Volumetric Videos",
"Implicit Neural Representations",
"Neural Fields"
],
"authors": [
{
"givenName": "Brandon Yushan",
"surname": "Feng",
"fullName": "Brandon Yushan Feng",
"affiliation": "Department of Computer Science, University of Maryland, College Park, MD, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Amitabh",
"surname": "Varshney",
"fullName": "Amitabh Varshney",
"affiliation": "Department of Computer Science, University of Maryland, College Park, MD, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-11-01 00:00:00",
"pubType": "trans",
"pages": "1-11",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/dicta/2011/4588/0/4588a536",
"title": "Face Recognition across Pose on Video Using Eigen Light-Fields",
"doi": null,
"abstractUrl": "/proceedings-article/dicta/2011/4588a536/12OmNvUaNpk",
"parentPublication": {
"id": "proceedings/dicta/2011/4588/0",
"title": "2011 International Conference on Digital Image Computing: Techniques and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2004/04/i0449",
"title": "Appearance-Based Face Recognition and Light-Fields",
"doi": null,
"abstractUrl": "/journal/tp/2004/04/i0449/13rRUyYjKbp",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/05/08620368",
"title": "Light Field Super-Resolution Using a Low-Rank Prior and Deep Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/journal/tp/2020/05/08620368/17D45Wt3Exc",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200o4204",
"title": "SIGNET: Efficient Neural Representation for Light Fields",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200o4204/1BmELbO5QZi",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2022/7218/0/09859373",
"title": "LFC-SASR: Light Field Coding Using Spatial and Angular Super-Resolution",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2022/09859373/1G4F0ndbVoQ",
"parentPublication": {
"id": "proceedings/icmew/2022/7218/0",
"title": "2022 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600t9787",
"title": "Learning Neural Light Fields with Ray-Space Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600t9787/1H0OiVLs2TS",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600m2943",
"title": "Towards Multimodal Depth Estimation from Light Fields",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600m2943/1H1k4uRP4sM",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600s8398",
"title": "Neural Point Light Fields",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600s8398/1H1kUbIJXgY",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2022/5670/0/567000a231",
"title": "Progressive Multi-Scale Light Field Networks",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2022/567000a231/1KYswQA4BZ6",
"parentPublication": {
"id": "proceedings/3dv/2022/5670/0",
"title": "2022 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a672",
"title": "A Linear Approach to Absolute Pose Estimation for Light Fields",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a672/1qyxisAtUOI",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09968044",
"articleId": "1IKDeaftuUM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09969167",
"articleId": "1IMicNIXex2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1IMic5lQXRK",
"name": "ttg555501-09968104s1-supp1-3224674.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09968104s1-supp1-3224674.mp4",
"extension": "mp4",
"size": "29.7 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1IHMR48xnyM",
"doi": "10.1109/TVCG.2022.3225327",
"abstract": "Visual question answering (VQA) has experienced tremendous progress in recent years. However, most efforts have only focused on 2D image question-answering tasks. In this paper, we extend VQA to its 3D counterpart, 3D question answering (3DQA), which can facilitate a machine's perception of 3D real-world scenarios. Unlike 2D image VQA, 3DQA takes the color point cloud as input and requires both appearance and 3D geometrical comprehension to answer the 3D-related questions. To this end, we propose a novel transformer-based 3DQA framework <bold>“3DQA-TR”</bold>, which consists of two encoders to exploit the appearance and geometry information, respectively. Finally, the multi-modal information about the appearance, geometry, and linguistic question can attend to each other via a 3D-linguistic Bert to predict the target answers. To verify the effectiveness of our proposed 3DQA framework, we further develop the first 3DQA dataset <bold>“ScanQA”</bold>, which builds on the ScanNet dataset and contains over 10 K question-answer pairs for 806 scenes. To the best of our knowledge, ScanQA is the first large-scale dataset with natural-language questions and free-form answers in 3D environments that is <bold>fully human-annotated</bold>. We also use several visualizations and experiments to investigate the astonishing diversity of the collected questions and the significant differences between this task from 2D VQA and 3D captioning. Extensive experiments on this dataset demonstrate the obvious superiority of our proposed 3DQA framework over state-of-the-art VQA frameworks and the effectiveness of our major designs. Our code and dataset will be made publicly available to facilitate research in this direction. The code and data are available at http://shuquanye.com/3DQA\\_website/.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visual question answering (VQA) has experienced tremendous progress in recent years. However, most efforts have only focused on 2D image question-answering tasks. In this paper, we extend VQA to its 3D counterpart, 3D question answering (3DQA), which can facilitate a machine's perception of 3D real-world scenarios. Unlike 2D image VQA, 3DQA takes the color point cloud as input and requires both appearance and 3D geometrical comprehension to answer the 3D-related questions. To this end, we propose a novel transformer-based 3DQA framework <bold>“3DQA-TR”</bold>, which consists of two encoders to exploit the appearance and geometry information, respectively. Finally, the multi-modal information about the appearance, geometry, and linguistic question can attend to each other via a 3D-linguistic Bert to predict the target answers. To verify the effectiveness of our proposed 3DQA framework, we further develop the first 3DQA dataset <bold>“ScanQA”</bold>, which builds on the ScanNet dataset and contains over 10 K question-answer pairs for 806 scenes. To the best of our knowledge, ScanQA is the first large-scale dataset with natural-language questions and free-form answers in 3D environments that is <bold>fully human-annotated</bold>. We also use several visualizations and experiments to investigate the astonishing diversity of the collected questions and the significant differences between this task from 2D VQA and 3D captioning. Extensive experiments on this dataset demonstrate the obvious superiority of our proposed 3DQA framework over state-of-the-art VQA frameworks and the effectiveness of our major designs. Our code and dataset will be made publicly available to facilitate research in this direction. The code and data are available at http://shuquanye.com/3DQA\\_website/.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visual question answering (VQA) has experienced tremendous progress in recent years. However, most efforts have only focused on 2D image question-answering tasks. In this paper, we extend VQA to its 3D counterpart, 3D question answering (3DQA), which can facilitate a machine's perception of 3D real-world scenarios. Unlike 2D image VQA, 3DQA takes the color point cloud as input and requires both appearance and 3D geometrical comprehension to answer the 3D-related questions. To this end, we propose a novel transformer-based 3DQA framework “3DQA-TR”, which consists of two encoders to exploit the appearance and geometry information, respectively. Finally, the multi-modal information about the appearance, geometry, and linguistic question can attend to each other via a 3D-linguistic Bert to predict the target answers. To verify the effectiveness of our proposed 3DQA framework, we further develop the first 3DQA dataset “ScanQA”, which builds on the ScanNet dataset and contains over 10 K question-answer pairs for 806 scenes. To the best of our knowledge, ScanQA is the first large-scale dataset with natural-language questions and free-form answers in 3D environments that is fully human-annotated. We also use several visualizations and experiments to investigate the astonishing diversity of the collected questions and the significant differences between this task from 2D VQA and 3D captioning. Extensive experiments on this dataset demonstrate the obvious superiority of our proposed 3DQA framework over state-of-the-art VQA frameworks and the effectiveness of our major designs. Our code and dataset will be made publicly available to facilitate research in this direction. The code and data are available at http://shuquanye.com/3DQA\\_website/.",
"title": "3D Question Answering",
"normalizedTitle": "3D Question Answering",
"fno": "09965773",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Three Dimensional Displays",
"Task Analysis",
"Question Answering Information Retrieval",
"Point Cloud Compression",
"Geometry",
"Image Color Analysis",
"Transformers",
"Point Cloud",
"Scene Understanding"
],
"authors": [
{
"givenName": "Shuquan",
"surname": "Ye",
"fullName": "Shuquan Ye",
"affiliation": "City University of Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dongdong",
"surname": "Chen",
"fullName": "Dongdong Chen",
"affiliation": "Microsoft Cloud AI",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Songfang",
"surname": "Han",
"fullName": "Songfang Han",
"affiliation": "University of California San Diego, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jing",
"surname": "Liao",
"fullName": "Jing Liao",
"affiliation": "City University of Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-11-01 00:00:00",
"pubType": "trans",
"pages": "1-16",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2022/8563/0/09859639",
"title": "Multi-Head Attention Fusion Network for Visual Question Answering",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859639/1G9EIhRN8C4",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859591",
"title": "Question-Driven Graph Fusion Network for Visual Question Answering",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859591/1G9Ep1BWxIQ",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2022/9774/0/977400a233",
"title": "3DVQA: Visual Question Answering for 3D Environments",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2022/977400a233/1GeCye8PHzO",
"parentPublication": {
"id": "proceedings/crv/2022/9774/0",
"title": "2022 19th Conference on Robots and Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600f089",
"title": "Maintaining Reasoning Consistency in Compositional Visual Question Answering",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600f089/1H0MXvE8cjm",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600t9107",
"title": "ScanQA: 3D Question Answering for Spatial Scene Understanding",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600t9107/1H1k0fH76uc",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2022/6819/0/09995473",
"title": "MHKD-MVQA: Multimodal Hierarchical Knowledge Distillation for Medical Visual Question Answering",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2022/09995473/1JC2qFvLZ96",
"parentPublication": {
"id": "proceedings/bibm/2022/6819/0",
"title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600b084",
"title": "Barlow constrained optimization for Visual Question Answering",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600b084/1L8qm6uqYWQ",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2022/6495/0/649500a224",
"title": "Multimodal Knowledge Reasoning for Enhanced Visual Question Answering",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2022/649500a224/1MeoND4bVV6",
"parentPublication": {
"id": "proceedings/sitis/2022/6495/0",
"title": "2022 16th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/5555/01/10102595",
"title": "Event-Oriented Visual Question Answering: The E-VQA Dataset and Benchmark",
"doi": null,
"abstractUrl": "/journal/tk/5555/01/10102595/1MkXSR95oIM",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a756",
"title": "Incorporating 3D Information Into Visual Question Answering",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a756/1ezRE66VHZS",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09964397",
"articleId": "1IFELlEsIve",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09966829",
"articleId": "1IIYlkz8kkE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1IIYk820GKk",
"name": "ttg555501-09965773s1-supp1-3225327.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09965773s1-supp1-3225327.pdf",
"extension": "pdf",
"size": "3.3 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1IIYlkz8kkE",
"doi": "10.1109/TVCG.2022.3225554",
"abstract": "DNGs are diverse network graphs with texts and different styles of nodes and edges, including mind maps, modeling graphs, and flowcharts. They are high-level visualizations that are easy for humans to understand but difficult for machines. Inspired by the process of human perception of graphs, we propose a method called GraphDecoder to extract data from raster images. Given a raster image, we extract the content based on a neural network. We built a semantic segmentation network based on U-Net. We increase the attention mechanism module, simplify the network model, and design a specific loss function to improve the model's ability to extract graph data. After this semantic segmentation network, we can extract the data of all nodes and edges. We then combine these data to obtain the topological relationship of the entire DNG. We also provide an interactive interface for users to redesign the DNGs. We verify the effectiveness of our method by evaluations and user studies on datasets collected on the Internet and generated datasets.",
"abstracts": [
{
"abstractType": "Regular",
"content": "DNGs are diverse network graphs with texts and different styles of nodes and edges, including mind maps, modeling graphs, and flowcharts. They are high-level visualizations that are easy for humans to understand but difficult for machines. Inspired by the process of human perception of graphs, we propose a method called GraphDecoder to extract data from raster images. Given a raster image, we extract the content based on a neural network. We built a semantic segmentation network based on U-Net. We increase the attention mechanism module, simplify the network model, and design a specific loss function to improve the model's ability to extract graph data. After this semantic segmentation network, we can extract the data of all nodes and edges. We then combine these data to obtain the topological relationship of the entire DNG. We also provide an interactive interface for users to redesign the DNGs. We verify the effectiveness of our method by evaluations and user studies on datasets collected on the Internet and generated datasets.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "DNGs are diverse network graphs with texts and different styles of nodes and edges, including mind maps, modeling graphs, and flowcharts. They are high-level visualizations that are easy for humans to understand but difficult for machines. Inspired by the process of human perception of graphs, we propose a method called GraphDecoder to extract data from raster images. Given a raster image, we extract the content based on a neural network. We built a semantic segmentation network based on U-Net. We increase the attention mechanism module, simplify the network model, and design a specific loss function to improve the model's ability to extract graph data. After this semantic segmentation network, we can extract the data of all nodes and edges. We then combine these data to obtain the topological relationship of the entire DNG. We also provide an interactive interface for users to redesign the DNGs. We verify the effectiveness of our method by evaluations and user studies on datasets collected on the Internet and generated datasets.",
"title": "GraphDecoder: Recovering Diverse Network Graphs from Visualization Images via Attention-Aware Learning",
"normalizedTitle": "GraphDecoder: Recovering Diverse Network Graphs from Visualization Images via Attention-Aware Learning",
"fno": "09966829",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Mining",
"Data Visualization",
"Visualization",
"Bars",
"Image Edge Detection",
"Task Analysis",
"Solids",
"Information Visualization",
"Chart Mining",
"Semantic Segmentation",
"Network Graph",
"Attention Mechanism"
],
"authors": [
{
"givenName": "Sicheng",
"surname": "Song",
"fullName": "Sicheng Song",
"affiliation": "School of Computer Science and Technology, East China Normal University, Shanghai, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chenhui",
"surname": "Li",
"fullName": "Chenhui Li",
"affiliation": "School of Computer Science and Technology, East China Normal University, Shanghai, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dong",
"surname": "Li",
"fullName": "Dong Li",
"affiliation": "School of Computer Science and Technology, East China Normal University, Shanghai, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Juntong",
"surname": "Chen",
"fullName": "Juntong Chen",
"affiliation": "School of Computer Science and Technology, East China Normal University, Shanghai, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Changbo",
"surname": "Wang",
"fullName": "Changbo Wang",
"affiliation": "School of Computer Science and Technology, East China Normal University, Shanghai, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-11-01 00:00:00",
"pubType": "trans",
"pages": "1-17",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2010/7846/0/05571244",
"title": "3D Edge Bundling for Geographical Data Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2010/05571244/12OmNqzu6LL",
"parentPublication": {
"id": "proceedings/iv/2010/7846/0",
"title": "2010 14th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/jcdl/2016/4229/0/07559622",
"title": "Curve separation for line graphs in scholarly documents",
"doi": null,
"abstractUrl": "/proceedings-article/jcdl/2016/07559622/12OmNscOUib",
"parentPublication": {
"id": "proceedings/jcdl/2016/4229/0",
"title": "2016 IEEE/ACM Joint Conference on Digital Libraries (JCDL)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2017/05/mcg2017050018",
"title": "Typology of Uncertainty in Static Geolocated Graphs for Visualization",
"doi": null,
"abstractUrl": "/magazine/cg/2017/05/mcg2017050018/13rRUIJuxxZ",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/05/ttg2012050810",
"title": "Ambiguity-Free Edge-Bundling for Interactive Graph Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2012/05/ttg2012050810/13rRUxASuby",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg2011122364",
"title": "Skeleton-Based Edge Bundling for Graph Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg2011122364/13rRUxjyX3W",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icbk/2018/9125/0/912500a139",
"title": "Snapshot Visualization of Complex Graphs with Force-Directed Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/icbk/2018/912500a139/17D45VsBU1x",
"parentPublication": {
"id": "proceedings/icbk/2018/9125/0",
"title": "2018 IEEE International Conference on Big Knowledge (ICBK)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2018/7202/0/720200a159",
"title": "Improving Perception Accuracy in Bar Charts with Internal Contrast and Framing Enhancements",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2018/720200a159/17D45WnnFWc",
"parentPublication": {
"id": "proceedings/iv/2018/7202/0",
"title": "2018 22nd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09720180",
"title": "VividGraph: Learning to Extract and Redesign Network Graphs from Visualization Images",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09720180/1Befc7QugjS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icci*cc/2019/1419/0/09146098",
"title": "Visualizing the Temporal Similarity Between Clusters of Dynamic Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/icci*cc/2019/09146098/1lFJdiVGPio",
"parentPublication": {
"id": "proceedings/icci*cc/2019/1419/0",
"title": "2019 IEEE 18th International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2020/9134/0/913400a248",
"title": "Time-Aligned Edge Plots for Dynamic Graph Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2020/913400a248/1rSR9vG2u4w",
"parentPublication": {
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09965773",
"articleId": "1IHMR48xnyM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09968044",
"articleId": "1IKDeaftuUM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1IRiLXfqSru",
"name": "ttg555501-09966829s1-supp1-3225554.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09966829s1-supp1-3225554.mp4",
"extension": "mp4",
"size": "23.7 MB",
"__typename": "WebExtraType"
},
{
"id": "1IRiLNVvzC8",
"name": "ttg555501-09966829s1-supp2-3225554.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09966829s1-supp2-3225554.pdf",
"extension": "pdf",
"size": "4.13 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1IFELlEsIve",
"doi": "10.1109/TVCG.2022.3225114",
"abstract": "Many Information Retrieval (IR) approaches have been proposed to extract relevant information from a large corpus. Among these methods, phrase-based retrieval methods have been proven to capture more concrete and concise information than word-based and paragraph-based methods. However, due to the complex relationship among phrases and a lack of proper visual guidance, achieving user-driven interactive information-seeking and retrieval remains challenging. In this study, we present a visual analytic approach for users to seek information from an extensive collection of documents efficiently. The main component of our approach is a PhraseMap, where nodes and edges represent the extracted keyphrases and their relationships, respectively, from a large corpus. To build the PhraseMap, we extract keyphrases from each document and link the phrases according to word attention determined using modern language models, i.e., BERT. As can be imagined, the graph is complex due to the extensive volume of information and the massive amount of relationships. Therefore, we develop a navigation algorithm to facilitate information seeking. It includes (1) a question-answering (QA) model to identify phrases related to users' queries and (2) updating relevant phrases based on users' feedback. To better present the PhraseMap, we introduce a resource-controlled self-organizing map (RC-SOM) to evenly and regularly display phrases on grid cells while expecting phrases with similar semantics to stay close in the visualization. To evaluate our approach, we conducted case studies with three domain experts in diverse literature. The results and feedback demonstrate its effectiveness, usability, and intelligence.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Many Information Retrieval (IR) approaches have been proposed to extract relevant information from a large corpus. Among these methods, phrase-based retrieval methods have been proven to capture more concrete and concise information than word-based and paragraph-based methods. However, due to the complex relationship among phrases and a lack of proper visual guidance, achieving user-driven interactive information-seeking and retrieval remains challenging. In this study, we present a visual analytic approach for users to seek information from an extensive collection of documents efficiently. The main component of our approach is a PhraseMap, where nodes and edges represent the extracted keyphrases and their relationships, respectively, from a large corpus. To build the PhraseMap, we extract keyphrases from each document and link the phrases according to word attention determined using modern language models, i.e., BERT. As can be imagined, the graph is complex due to the extensive volume of information and the massive amount of relationships. Therefore, we develop a navigation algorithm to facilitate information seeking. It includes (1) a question-answering (QA) model to identify phrases related to users' queries and (2) updating relevant phrases based on users' feedback. To better present the PhraseMap, we introduce a resource-controlled self-organizing map (RC-SOM) to evenly and regularly display phrases on grid cells while expecting phrases with similar semantics to stay close in the visualization. To evaluate our approach, we conducted case studies with three domain experts in diverse literature. The results and feedback demonstrate its effectiveness, usability, and intelligence.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Many Information Retrieval (IR) approaches have been proposed to extract relevant information from a large corpus. Among these methods, phrase-based retrieval methods have been proven to capture more concrete and concise information than word-based and paragraph-based methods. However, due to the complex relationship among phrases and a lack of proper visual guidance, achieving user-driven interactive information-seeking and retrieval remains challenging. In this study, we present a visual analytic approach for users to seek information from an extensive collection of documents efficiently. The main component of our approach is a PhraseMap, where nodes and edges represent the extracted keyphrases and their relationships, respectively, from a large corpus. To build the PhraseMap, we extract keyphrases from each document and link the phrases according to word attention determined using modern language models, i.e., BERT. As can be imagined, the graph is complex due to the extensive volume of information and the massive amount of relationships. Therefore, we develop a navigation algorithm to facilitate information seeking. It includes (1) a question-answering (QA) model to identify phrases related to users' queries and (2) updating relevant phrases based on users' feedback. To better present the PhraseMap, we introduce a resource-controlled self-organizing map (RC-SOM) to evenly and regularly display phrases on grid cells while expecting phrases with similar semantics to stay close in the visualization. To evaluate our approach, we conducted case studies with three domain experts in diverse literature. The results and feedback demonstrate its effectiveness, usability, and intelligence.",
"title": "PhraseMap: Attention-Based Keyphrases Recommendation for Information Seeking",
"normalizedTitle": "PhraseMap: Attention-Based Keyphrases Recommendation for Information Seeking",
"fno": "09964397",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Navigation",
"Bit Error Rate",
"Semantics",
"Computational Modeling",
"Visual Analytics",
"Task Analysis",
"Machine Learning",
"Natural Language Processing",
"Textual Data",
"User In The Loop",
"Visual Analytics"
],
"authors": [
{
"givenName": "Yamei",
"surname": "Tu",
"fullName": "Yamei Tu",
"affiliation": "Department of Computer Science and Engineering, The Ohio State University, Columbus, OH, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rui",
"surname": "Qiu",
"fullName": "Rui Qiu",
"affiliation": "Department of Computer Science and Engineering, The Ohio State University, Columbus, OH, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yu-Shuen",
"surname": "Wang",
"fullName": "Yu-Shuen Wang",
"affiliation": "National Chiao Tung University, HsinChu, Taiwan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Po-Yin",
"surname": "Yen",
"fullName": "Po-Yin Yen",
"affiliation": "Washington University School of Medicine, St. Louis, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Han-Wei",
"surname": "Shen",
"fullName": "Han-Wei Shen",
"affiliation": "Department of Computer Science and Engineering, The Ohio State University, Columbus, OH, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-11-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/jcdl/2006/354/0/04119126",
"title": "Keyphrase extraction-based query expansion in digital libraries",
"doi": null,
"abstractUrl": "/proceedings-article/jcdl/2006/04119126/12OmNAle6iq",
"parentPublication": {
"id": "proceedings/jcdl/2006/354/0",
"title": "2006 IEEE/ACM 6th Joint Conference on Digital Libraries",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi-iat/2008/3496/1/3496a214",
"title": "An Automatic Online News Topic Keyphrase Extraction System",
"doi": null,
"abstractUrl": "/proceedings-article/wi-iat/2008/3496a214/12OmNCcbEgB",
"parentPublication": {
"id": "proceedings/wi-iat/2008/3496/1",
"title": "Web Intelligence and Intelligent Agent Technology, IEEE/WIC/ACM International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2004/2056/4/205640104c",
"title": "Automating Keyphrase Extraction with Multi-Objective Genetic Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2004/205640104c/12OmNrK9q21",
"parentPublication": {
"id": "proceedings/hicss/2004/2056/4",
"title": "37th Annual Hawaii International Conference on System Sciences, 2004. Proceedings of the",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cts/2016/2300/0/07870993",
"title": "Supporting Collaborative Information Seeking in Online Community Engagement",
"doi": null,
"abstractUrl": "/proceedings-article/cts/2016/07870993/12OmNxFsmnV",
"parentPublication": {
"id": "proceedings/cts/2016/2300/0",
"title": "2016 International Conference on Collaboration Technologies and Systems (CTS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2008/3357/1/3357b061",
"title": "Improved Automatic Keyphrase Extraction by Using Semantic Information",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2008/3357b061/12OmNxwWozl",
"parentPublication": {
"id": "proceedings/icicta/2008/3357/1",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2009/03/mco2009030060",
"title": "Evaluation Challenges and Directions for Information-Seeking Support Systems",
"doi": null,
"abstractUrl": "/magazine/co/2009/03/mco2009030060/13rRUxlgy70",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2009/03/mco2009030047",
"title": "Collaborative Information Seeking",
"doi": null,
"abstractUrl": "/magazine/co/2009/03/mco2009030047/13rRUyekJ0S",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2021/2398/0/239800b529",
"title": "Topic-Attentive Encoder-Decoder with Pre-Trained Language Model for Keyphrase Generation",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2021/239800b529/1Aqxk8Dt7R6",
"parentPublication": {
"id": "proceedings/icdm/2021/2398/0",
"title": "2021 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2022/9548/0/954800a077",
"title": "Information-Seeking in Localization and Mission Planning of Multi-Agent Systems",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2022/954800a077/1GvdeT51bXy",
"parentPublication": {
"id": "proceedings/mipr/2022/9548/0",
"title": "2022 IEEE 5th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09939115",
"title": "DocFlow: A Visual Analytics System for Question-based Document Retrieval and Categorization",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09939115/1I1KuH1xVF6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09961901",
"articleId": "1IxvZ4KZbri",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09965773",
"articleId": "1IHMR48xnyM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1IHMRkjXomQ",
"name": "ttg555501-09964397s1-supp1-3225114.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09964397s1-supp1-3225114.pdf",
"extension": "pdf",
"size": "833 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1IxvZ4KZbri",
"doi": "10.1109/TVCG.2022.3224073",
"abstract": "Several advanced redirected walking techniques have been proposed in recent years to improve natural walking in virtual environments. One active and important research challenge of redirected walking focuses on the alignment of virtual and physical environments by redirection gains. If both environments are aligned, physical objects appear at the same positions as their virtual counterparts. When a user arrives at such a virtual object, she can touch the corresponding physical object providing passive haptic feedback. When multiple transferable virtual or physical target positions exist, the alignment can exploit multiple options, but the process requires more complicated solutions. In this paper, we study the problem of virtual-physical environmental alignment at multiple transferable target positions, and introduce a novel reinforcement learning-based redirected walking method. We design a novel comprehensive reward function that dynamically determines virtual-physical target matching and updates virtual target weights for reward computation. We evaluate our method through various simulated experiments as well as real user tests. The results show that our method obtains less physical distance error for environmental alignment and requires fewer resets than state-of-the-art techniques.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Several advanced redirected walking techniques have been proposed in recent years to improve natural walking in virtual environments. One active and important research challenge of redirected walking focuses on the alignment of virtual and physical environments by redirection gains. If both environments are aligned, physical objects appear at the same positions as their virtual counterparts. When a user arrives at such a virtual object, she can touch the corresponding physical object providing passive haptic feedback. When multiple transferable virtual or physical target positions exist, the alignment can exploit multiple options, but the process requires more complicated solutions. In this paper, we study the problem of virtual-physical environmental alignment at multiple transferable target positions, and introduce a novel reinforcement learning-based redirected walking method. We design a novel comprehensive reward function that dynamically determines virtual-physical target matching and updates virtual target weights for reward computation. We evaluate our method through various simulated experiments as well as real user tests. The results show that our method obtains less physical distance error for environmental alignment and requires fewer resets than state-of-the-art techniques.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Several advanced redirected walking techniques have been proposed in recent years to improve natural walking in virtual environments. One active and important research challenge of redirected walking focuses on the alignment of virtual and physical environments by redirection gains. If both environments are aligned, physical objects appear at the same positions as their virtual counterparts. When a user arrives at such a virtual object, she can touch the corresponding physical object providing passive haptic feedback. When multiple transferable virtual or physical target positions exist, the alignment can exploit multiple options, but the process requires more complicated solutions. In this paper, we study the problem of virtual-physical environmental alignment at multiple transferable target positions, and introduce a novel reinforcement learning-based redirected walking method. We design a novel comprehensive reward function that dynamically determines virtual-physical target matching and updates virtual target weights for reward computation. We evaluate our method through various simulated experiments as well as real user tests. The results show that our method obtains less physical distance error for environmental alignment and requires fewer resets than state-of-the-art techniques.",
"title": "Transferable Virtual-Physical Environmental Alignment with Redirected Walking",
"normalizedTitle": "Transferable Virtual-Physical Environmental Alignment with Redirected Walking",
"fno": "09961901",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Legged Locomotion",
"Haptic Interfaces",
"Solids",
"Virtual Environments",
"Target Tracking",
"Orbits",
"Trajectory",
"Redirected Walking",
"Virtual Physical Environmental Alignment",
"Reinforcement Learning"
],
"authors": [
{
"givenName": "Miao",
"surname": "Wang",
"fullName": "Miao Wang",
"affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, School of Computer Science and Engineering, Beihang University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ze-Yin",
"surname": "Chen",
"fullName": "Ze-Yin Chen",
"affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, School of Computer Science and Engineering, Beihang University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wen-Chuan",
"surname": "Cai",
"fullName": "Wen-Chuan Cai",
"affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, School of Computer Science and Engineering, Beihang University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Frank",
"surname": "Steinicke",
"fullName": "Frank Steinicke",
"affiliation": "Department of Informatics, Universität Hamburg, Hamburg, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-11-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2018/3365/0/08446579",
"title": "Leveraging Configuration Spaces and Navigation Functions for Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446579/13bd1fdV4lq",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2013/01/mcg2013010006",
"title": "Using Perceptual Illusions for Redirected Walking",
"doi": null,
"abstractUrl": "/magazine/cg/2013/01/mcg2013010006/13rRUB6SpRZ",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg201404579",
"title": "Performance of Redirected Walking Algorithms in a Constrained Virtual World",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404579/13rRUwjoNx4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/04/ttg2013040634",
"title": "Comparing Four Approaches to Generalized Redirected Walking: Simulation and Live User Data",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040634/13rRUx0Pqpx",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07036075",
"title": "Cognitive Resource Demands of Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07036075/13rRUxcKzVm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08645818",
"title": "Multi-User Redirected Walking and Resetting Using Artificial Potential Fields",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08645818/17PYEiVyc2v",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049730",
"title": "Monte-Carlo Redirected Walking: Gain Selection Through Simulated Walks",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049730/1KYowitu5OM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090595",
"title": "Reactive Alignment of Virtual and Physical Environments Using Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090595/1jIxm1j8B2w",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2022/02/09364750",
"title": "Multi-Technique Redirected Walking Method",
"doi": null,
"abstractUrl": "/journal/ec/2022/02/09364750/1rxdpzgvsxG",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09382909",
"title": "ARC: Alignment-based Redirection Controller for Redirected Walking in Complex Environments",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09382909/1saZt58Vwf6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09956758",
"articleId": "1Iu2JIUXLR6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09964397",
"articleId": "1IFELlEsIve",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1Iz10b3TLDW",
"name": "ttg555501-09961901s1-supp1-3224073.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09961901s1-supp1-3224073.mp4",
"extension": "mp4",
"size": "91.9 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Iu2J7nECo8",
"doi": "10.1109/TVCG.2022.3223399",
"abstract": "Dimension reduction (DR) is commonly utilized to capture the intrinsic structure and transform high-dimensional data into low-dimensional space while retaining meaningful properties of the original data. It is used in various applications, such as image recognition, single-cell sequencing analysis, and biomarker discovery. However, contemporary parametric-free and parametric DR techniques suffer from several significant shortcomings, such as the inability to preserve global and local features and the pool generalization performance. On the other hand, regarding explainability, it is crucial to comprehend the embedding process, especially the contribution of each part to the embedding process, while understanding how each feature affects the embedding results that identify critical components and help diagnose the embedding process. To address these problems, we have developed a deep neural network method called EVNet, which provides not only excellent performance in structural maintainability but also explainability to the DR therein. EVNet starts with data augmentation and a manifold-based loss function to improve embedding performance. The explanation is based on saliency maps and aims to examine the trained EVNet parameters and contributions of components during the embedding process. The proposed techniques are integrated with a visual interface to help the user to adjust EVNet to achieve better DR performance and explainability. The interactive visual interface makes it easier to illustrate the data features, compare different DR techniques, and investigate DR. An in-depth experimental comparison shows that EVNet consistently outperforms the state-of-the-art methods in both performance measures and explainability.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Dimension reduction (DR) is commonly utilized to capture the intrinsic structure and transform high-dimensional data into low-dimensional space while retaining meaningful properties of the original data. It is used in various applications, such as image recognition, single-cell sequencing analysis, and biomarker discovery. However, contemporary parametric-free and parametric DR techniques suffer from several significant shortcomings, such as the inability to preserve global and local features and the pool generalization performance. On the other hand, regarding explainability, it is crucial to comprehend the embedding process, especially the contribution of each part to the embedding process, while understanding how each feature affects the embedding results that identify critical components and help diagnose the embedding process. To address these problems, we have developed a deep neural network method called EVNet, which provides not only excellent performance in structural maintainability but also explainability to the DR therein. EVNet starts with data augmentation and a manifold-based loss function to improve embedding performance. The explanation is based on saliency maps and aims to examine the trained EVNet parameters and contributions of components during the embedding process. The proposed techniques are integrated with a visual interface to help the user to adjust EVNet to achieve better DR performance and explainability. The interactive visual interface makes it easier to illustrate the data features, compare different DR techniques, and investigate DR. An in-depth experimental comparison shows that EVNet consistently outperforms the state-of-the-art methods in both performance measures and explainability.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Dimension reduction (DR) is commonly utilized to capture the intrinsic structure and transform high-dimensional data into low-dimensional space while retaining meaningful properties of the original data. It is used in various applications, such as image recognition, single-cell sequencing analysis, and biomarker discovery. However, contemporary parametric-free and parametric DR techniques suffer from several significant shortcomings, such as the inability to preserve global and local features and the pool generalization performance. On the other hand, regarding explainability, it is crucial to comprehend the embedding process, especially the contribution of each part to the embedding process, while understanding how each feature affects the embedding results that identify critical components and help diagnose the embedding process. To address these problems, we have developed a deep neural network method called EVNet, which provides not only excellent performance in structural maintainability but also explainability to the DR therein. EVNet starts with data augmentation and a manifold-based loss function to improve embedding performance. The explanation is based on saliency maps and aims to examine the trained EVNet parameters and contributions of components during the embedding process. The proposed techniques are integrated with a visual interface to help the user to adjust EVNet to achieve better DR performance and explainability. The interactive visual interface makes it easier to illustrate the data features, compare different DR techniques, and investigate DR. An in-depth experimental comparison shows that EVNet consistently outperforms the state-of-the-art methods in both performance measures and explainability.",
"title": "EVNet: An Explainable Deep Network for Dimension Reduction",
"normalizedTitle": "EVNet: An Explainable Deep Network for Dimension Reduction",
"fno": "09956753",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Data Models",
"Parametric Statistics",
"Manifolds",
"Biological System Modeling",
"Sun",
"Predictive Models",
"Dimension Reduction",
"Explainability Of DR Models",
"Deep Learning",
"Parametric Model"
],
"authors": [
{
"givenName": "Zelin",
"surname": "Zang",
"fullName": "Zelin Zang",
"affiliation": "AI Division, School of Engineering, Westlake University, Hangzhou, Zhejiang Province, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shenghui",
"surname": "Cheng",
"fullName": "Shenghui Cheng",
"affiliation": "AI Division, School of Engineering, Westlake University, Hangzhou, Zhejiang Province, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Linyan",
"surname": "Lu",
"fullName": "Linyan Lu",
"affiliation": "AI Division, School of Engineering, Westlake University, Hangzhou, Zhejiang Province, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hanchen",
"surname": "Xia",
"fullName": "Hanchen Xia",
"affiliation": "AI Division, School of Engineering, Westlake University, Hangzhou, Zhejiang Province, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Liangyu",
"surname": "Li",
"fullName": "Liangyu Li",
"affiliation": "AI Division, School of Engineering, Westlake University, Hangzhou, Zhejiang Province, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yaoting",
"surname": "Sun",
"fullName": "Yaoting Sun",
"affiliation": "AI Division, School of Engineering, Westlake University, Hangzhou, Zhejiang Province, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yongjie",
"surname": "Xu",
"fullName": "Yongjie Xu",
"affiliation": "AI Division, School of Engineering, Westlake University, Hangzhou, Zhejiang Province, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lei",
"surname": "Shang",
"fullName": "Lei Shang",
"affiliation": "Alibaba Group, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Baigui",
"surname": "Sun",
"fullName": "Baigui Sun",
"affiliation": "Alibaba Group, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Stan Z.",
"surname": "Li",
"fullName": "Stan Z. Li",
"affiliation": "AI Division, School of Engineering, Westlake University, Hangzhou, Zhejiang Province, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-11-01 00:00:00",
"pubType": "trans",
"pages": "1-18",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2011/0063/0/06130412",
"title": "A invertible dimension reduction of curves on a manifold",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130412/12OmNAFFdEL",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/focs/1992/2900/0/0267783",
"title": "Maximizing non-linear concave functions in fixed dimension",
"doi": null,
"abstractUrl": "/proceedings-article/focs/1992/0267783/12OmNxw5BlJ",
"parentPublication": {
"id": "proceedings/focs/1992/2900/0",
"title": "Proceedings., 33rd Annual Symposium on Foundations of Computer Science",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse-nier/2018/5662/0/566201a053",
"title": "Explainable Software Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/icse-nier/2018/566201a053/13bd1h03qO8",
"parentPublication": {
"id": "proceedings/icse-nier/2018/5662/0",
"title": "2018 IEEE/ACM 40th International Conference on Software Engineering: New Ideas and Emerging Technologies Results (ICSE-NIER)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/12/ttg2013122634",
"title": "Empirical Guidance on Scatterplot and Dimension Reduction Technique Choices",
"doi": null,
"abstractUrl": "/journal/tg/2013/12/ttg2013122634/13rRUEgs2BW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdcs/2022/7177/0/717700b262",
"title": "Explainable Deep Learning Methodologies for Biomedical Images Classification",
"doi": null,
"abstractUrl": "/proceedings-article/icdcs/2022/717700b262/1HriVQprIEE",
"parentPublication": {
"id": "proceedings/icdcs/2022/7177/0",
"title": "2022 IEEE 42nd International Conference on Distributed Computing Systems (ICDCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2022/8812/0/881200a075",
"title": "Parametric Dimension Reduction by Preserving Local Structure",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2022/881200a075/1J6henXuhws",
"parentPublication": {
"id": "proceedings/vis/2022/8812/0",
"title": "2022 IEEE Visualization and Visual Analytics (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichi/2021/0132/0/013200a138",
"title": "Explainable Deep Learning for Readmission Prediction with Tree-GloVe Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2021/013200a138/1xIOPvI9QMU",
"parentPublication": {
"id": "proceedings/ichi/2021/0132/0",
"title": "2021 IEEE 9th International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552226",
"title": "Revisiting Dimensionality Reduction Techniques for Visual Cluster Analysis: An Empirical Study",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552226/1xicaXrIayI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/02/09585419",
"title": "Deep Recursive Embedding for High-Dimensional Data",
"doi": null,
"abstractUrl": "/journal/tg/2022/02/09585419/1y11cQpf9nO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2021/4899/0/489900d227",
"title": "Explainable Deep Classification Models for Domain Generalization",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2021/489900d227/1yXsUbOxd8A",
"parentPublication": {
"id": "proceedings/cvprw/2021/4899/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09950620",
"articleId": "1Ik4IPEtvu8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09956758",
"articleId": "1Iu2JIUXLR6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1IIYkY01ucE",
"name": "ttg555501-09956753s1-supp1-3223399.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09956753s1-supp1-3223399.mp4",
"extension": "mp4",
"size": "102 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Iu2JIUXLR6",
"doi": "10.1109/TVCG.2022.3223529",
"abstract": "Multivariate or multidimensional visualization plays an essential role in exploratory data analysis by allowing users to derive insights and formulate hypotheses. Despite their popularity, it is usually users' responsibility to (visually) discover the data patterns, which can be cumbersome and time-consuming. Visual Analytics (VA) and machine learning techniques can be instrumental in mitigating this problem by automatically discovering and representing such patterns. One example is the integration of classification models with (visual) interpretability strategies, where models are used as surrogates for data patterns so that understanding a model enables understanding the phenomenon represented by the data. Although useful and inspiring, the few proposed solutions are based on visual representations of so-called black-box models, so the interpretation of the patterns captured by the models is not straightforward, requiring mechanisms to transform them into human-understandable pieces of information. This paper presents <italic>multiVariate dAta eXplanation (VAX)</italic>, a new VA method to support identifying and visual interpreting patterns in multivariate datasets. Unlike the existing similar approaches, VAX uses the concept of Jumping Emerging Patterns, inherent interpretable logic statements representing class-variable relationships (patterns) derived from random Decision Trees. The potential of VAX is shown through use cases employing two real-world datasets covering different scenarios where intricate patterns are discovered and represented, something challenging to be done using usual exploratory approaches.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Multivariate or multidimensional visualization plays an essential role in exploratory data analysis by allowing users to derive insights and formulate hypotheses. Despite their popularity, it is usually users' responsibility to (visually) discover the data patterns, which can be cumbersome and time-consuming. Visual Analytics (VA) and machine learning techniques can be instrumental in mitigating this problem by automatically discovering and representing such patterns. One example is the integration of classification models with (visual) interpretability strategies, where models are used as surrogates for data patterns so that understanding a model enables understanding the phenomenon represented by the data. Although useful and inspiring, the few proposed solutions are based on visual representations of so-called black-box models, so the interpretation of the patterns captured by the models is not straightforward, requiring mechanisms to transform them into human-understandable pieces of information. This paper presents <italic>multiVariate dAta eXplanation (VAX)</italic>, a new VA method to support identifying and visual interpreting patterns in multivariate datasets. Unlike the existing similar approaches, VAX uses the concept of Jumping Emerging Patterns, inherent interpretable logic statements representing class-variable relationships (patterns) derived from random Decision Trees. The potential of VAX is shown through use cases employing two real-world datasets covering different scenarios where intricate patterns are discovered and represented, something challenging to be done using usual exploratory approaches.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Multivariate or multidimensional visualization plays an essential role in exploratory data analysis by allowing users to derive insights and formulate hypotheses. Despite their popularity, it is usually users' responsibility to (visually) discover the data patterns, which can be cumbersome and time-consuming. Visual Analytics (VA) and machine learning techniques can be instrumental in mitigating this problem by automatically discovering and representing such patterns. One example is the integration of classification models with (visual) interpretability strategies, where models are used as surrogates for data patterns so that understanding a model enables understanding the phenomenon represented by the data. Although useful and inspiring, the few proposed solutions are based on visual representations of so-called black-box models, so the interpretation of the patterns captured by the models is not straightforward, requiring mechanisms to transform them into human-understandable pieces of information. This paper presents multiVariate dAta eXplanation (VAX), a new VA method to support identifying and visual interpreting patterns in multivariate datasets. Unlike the existing similar approaches, VAX uses the concept of Jumping Emerging Patterns, inherent interpretable logic statements representing class-variable relationships (patterns) derived from random Decision Trees. The potential of VAX is shown through use cases employing two real-world datasets covering different scenarios where intricate patterns are discovered and represented, something challenging to be done using usual exploratory approaches.",
"title": "Multivariate Data Explanation by Jumping Emerging Patterns Visualization",
"normalizedTitle": "Multivariate Data Explanation by Jumping Emerging Patterns Visualization",
"fno": "09956758",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Data Models",
"Analytical Models",
"Layout",
"Data Mining",
"Predictive Models",
"Histograms",
"Data Explanation",
"Jumping Emerging Patterns",
"Random Decision Trees",
"Exploratory Analysis"
],
"authors": [
{
"givenName": "Mário Popolin",
"surname": "Neto",
"fullName": "Mário Popolin Neto",
"affiliation": "Federal Institute of São Paulo (IFSP), University of São Paulo (USP), Brazil",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Fernando V.",
"surname": "Paulovich",
"fullName": "Fernando V. Paulovich",
"affiliation": "Eindhoven University of Technology (TU/e), the Netherlands",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-11-01 00:00:00",
"pubType": "trans",
"pages": "1-16",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2007/2900/0/29000277",
"title": "Working with patterns in large multivariate datasets - Karnaugh-Veitch-Maps revisited",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2007/29000277/12OmNBSjJ0F",
"parentPublication": {
"id": "proceedings/iv/2007/2900/0",
"title": "2007 11th International Conference Information Visualization (IV '07)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2013/5099/0/5099a107",
"title": "Multidimensional Projections to Explore Time-Varying Multivariate Volume Data",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2013/5099a107/12OmNrkT7Pm",
"parentPublication": {
"id": "proceedings/sibgrapi/2013/5099/0",
"title": "2013 XXVI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2012/4752/0/06400536",
"title": "The spatiotemporal multivariate hypercube for discovery of patterns in event data",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2012/06400536/12OmNvnOwsG",
"parentPublication": {
"id": "proceedings/vast/2012/4752/0",
"title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/e-science/2017/2686/0/08109149",
"title": "Change Frequency Heatmaps for Temporal Multivariate Phenological Data Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/e-science/2017/08109149/12OmNxjjEgf",
"parentPublication": {
"id": "proceedings/e-science/2017/2686/0",
"title": "2017 IEEE 13th International Conference on e-Science (e-Science)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2006/2602/0/26020025",
"title": "\"GeoAnalytics\" - Exploring spatio-temporal and multivariate data",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2006/26020025/12OmNzd7bl4",
"parentPublication": {
"id": "proceedings/iv/2006/2602/0",
"title": "Tenth International Conference on Information Visualisation (IV'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2013/5108/0/5108a201",
"title": "Extraction of Interpretable Multivariate Patterns for Early Diagnostics",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2013/5108a201/12OmNzlD95g",
"parentPublication": {
"id": "proceedings/icdm/2013/5108/0",
"title": "2013 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/06/v1461",
"title": "A Visualization System for Space-Time and Multivariate Patterns (VIS-STAMP)",
"doi": null,
"abstractUrl": "/journal/tg/2006/06/v1461/13rRUwgQpDk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192673",
"title": "Temporal MDS Plots for Analysis of Multivariate Data",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192673/13rRUx0gefm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2008/2935/0/04677368",
"title": "Multivariate visual explanation for high dimensional datasets",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2008/04677368/1oFGMltTtFC",
"parentPublication": {
"id": "proceedings/vast/2008/2935/0",
"title": "2008 IEEE Symposium on Visual Analytics Science and Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2020/6251/0/09378380",
"title": "Combining Global and Sequential Patterns for Multivariate Time Series Forecasting",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2020/09378380/1s64SdG49Hi",
"parentPublication": {
"id": "proceedings/big-data/2020/6251/0",
"title": "2020 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09956753",
"articleId": "1Iu2J7nECo8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09961901",
"articleId": "1IxvZ4KZbri",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1IxvYS0skbC",
"name": "ttg555501-09956758s1-supp1-3223529.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09956758s1-supp1-3223529.pdf",
"extension": "pdf",
"size": "10.4 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Ik4IPEtvu8",
"doi": "10.1109/TVCG.2022.3222186",
"abstract": "Due to their great performance in many challenges, Deep Learning (DL) techniques keep gaining popularity in many fields. They have been adapted to process graph data structures to solve various complicated tasks such as graph classification and edge prediction. Eventually, they reached the Graph Drawing (GD) task. This paper is an extended version of the previously published <inline-formula><tex-math notation=\"LaTeX\">Z_$(DNN)^{2}$_Z</tex-math></inline-formula> and presents a framework to leverage DL techniques for graph drawing (DL4GD). We demonstrate how it is possible to train a Deep Learning model to extract features from a graph and project them into a graph layout. The method proposes to leverage efficient Convolutional Neural Networks, adapting them to graphs using Graph Convolutions. The graph layout projection is learned by optimizing a cost function that does not require any ground truth layout, as opposed to prior work. This paper also proposes an implementation and benchmark of the framework to study its sensitivity to certain deep learning-related conditions. As the field is novel, and many questions remain to be answered, we do not focus on finding the most optimal implementation of the method, but rather contribute toward a better understanding of the approach potential. More precisely, we study different learning strategies relative to the models training datasets. Finally, we discuss the main advantages and limitations of DL4GD.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Due to their great performance in many challenges, Deep Learning (DL) techniques keep gaining popularity in many fields. They have been adapted to process graph data structures to solve various complicated tasks such as graph classification and edge prediction. Eventually, they reached the Graph Drawing (GD) task. This paper is an extended version of the previously published <inline-formula><tex-math notation=\"LaTeX\">$(DNN)^{2}$</tex-math></inline-formula> and presents a framework to leverage DL techniques for graph drawing (DL4GD). We demonstrate how it is possible to train a Deep Learning model to extract features from a graph and project them into a graph layout. The method proposes to leverage efficient Convolutional Neural Networks, adapting them to graphs using Graph Convolutions. The graph layout projection is learned by optimizing a cost function that does not require any ground truth layout, as opposed to prior work. This paper also proposes an implementation and benchmark of the framework to study its sensitivity to certain deep learning-related conditions. As the field is novel, and many questions remain to be answered, we do not focus on finding the most optimal implementation of the method, but rather contribute toward a better understanding of the approach potential. More precisely, we study different learning strategies relative to the models training datasets. Finally, we discuss the main advantages and limitations of DL4GD.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Due to their great performance in many challenges, Deep Learning (DL) techniques keep gaining popularity in many fields. They have been adapted to process graph data structures to solve various complicated tasks such as graph classification and edge prediction. Eventually, they reached the Graph Drawing (GD) task. This paper is an extended version of the previously published - and presents a framework to leverage DL techniques for graph drawing (DL4GD). We demonstrate how it is possible to train a Deep Learning model to extract features from a graph and project them into a graph layout. The method proposes to leverage efficient Convolutional Neural Networks, adapting them to graphs using Graph Convolutions. The graph layout projection is learned by optimizing a cost function that does not require any ground truth layout, as opposed to prior work. This paper also proposes an implementation and benchmark of the framework to study its sensitivity to certain deep learning-related conditions. As the field is novel, and many questions remain to be answered, we do not focus on finding the most optimal implementation of the method, but rather contribute toward a better understanding of the approach potential. More precisely, we study different learning strategies relative to the models training datasets. Finally, we discuss the main advantages and limitations of DL4GD.",
"title": "Toward Efficient Deep Learning for Graph Drawing (DL4GD)",
"normalizedTitle": "Toward Efficient Deep Learning for Graph Drawing (DL4GD)",
"fno": "09950620",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Layout",
"Graph Drawing",
"Deep Learning",
"Task Analysis",
"Training",
"Measurement",
"Computational Modeling",
"Deep Learning",
"Graph Convolution",
"Graph Drawing",
"Graph Neural Network"
],
"authors": [
{
"givenName": "Loann",
"surname": "Giovannangeli",
"fullName": "Loann Giovannangeli",
"affiliation": "Univ. Bordeaux, CNRS, Bordeaux INP, INRIA, LaBRI, Talence, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Frederic",
"surname": "Lalanne",
"fullName": "Frederic Lalanne",
"affiliation": "Univ. Bordeaux, CNRS, Bordeaux INP, INRIA, LaBRI, Talence, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "David",
"surname": "Auber",
"fullName": "David Auber",
"affiliation": "Univ. Bordeaux, CNRS, Bordeaux INP, INRIA, LaBRI, Talence, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Romain",
"surname": "Giot",
"fullName": "Romain Giot",
"affiliation": "Univ. Bordeaux, CNRS, Bordeaux INP, INRIA, LaBRI, Talence, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Romain",
"surname": "Bourqui",
"fullName": "Romain Bourqui",
"affiliation": "Univ. Bordeaux, CNRS, Bordeaux INP, INRIA, LaBRI, Talence, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-11-01 00:00:00",
"pubType": "trans",
"pages": "1-16",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vl/1998/8712/0/87120032",
"title": "A Graph Rewriting Programming Language for Graph Drawing",
"doi": null,
"abstractUrl": "/proceedings-article/vl/1998/87120032/12OmNB7LvGY",
"parentPublication": {
"id": "proceedings/vl/1998/8712/0",
"title": "Visual Languages, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apvis/2007/0808/0/04126224",
"title": "Analysis of a high-dimensional approach to interactive graph drawing",
"doi": null,
"abstractUrl": "/proceedings-article/apvis/2007/04126224/12OmNwcCILb",
"parentPublication": {
"id": "proceedings/apvis/2007/0808/0",
"title": "Asia-Pacific Symposium on Visualisation 2007",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-infovis/2004/8779/0/87790191",
"title": "Dynamic Drawing of Clustered Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/2004/87790191/12OmNyugyVo",
"parentPublication": {
"id": "proceedings/ieee-infovis/2004/8779/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/1995/08/e0662",
"title": "Parametric Graph Drawing",
"doi": null,
"abstractUrl": "/journal/ts/1995/08/e0662/13rRUNvya2D",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/1998/11/t1297",
"title": "Interactive Orthogonal Graph Drawing",
"doi": null,
"abstractUrl": "/journal/tc/1998/11/t1297/13rRUwfZBZo",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/04/ttg2008040727",
"title": "Online Dynamic Graph Drawing",
"doi": null,
"abstractUrl": "/journal/tg/2008/04/ttg2008040727/13rRUxBJhvo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/08/05963661",
"title": "Coherent Time-Varying Graph Drawing with Multifocus+Context Interaction",
"doi": null,
"abstractUrl": "/journal/tg/2012/08/05963661/13rRUxYIMUX",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/06/09723546",
"title": "Multicriteria Scalable Graph Drawing via Stochastic Gradient Descent, <inline-formula><tex-math notation=\"LaTeX\">Z_$(SGD)^{2}$_Z</tex-math></inline-formula>",
"doi": null,
"abstractUrl": "/journal/tg/2022/06/09723546/1BocJwdaFYk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08807275",
"title": "<italic>DeepDrawing</italic>: A Deep Learning Approach to Graph Drawing",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08807275/1cG6703GLja",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2021/05/09476996",
"title": "DeepGD: A Deep Learning Framework for Graph Drawing Using GNN",
"doi": null,
"abstractUrl": "/magazine/cg/2021/05/09476996/1v2MgNY5cuk",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09947006",
"articleId": "1Idr5neUL5e",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09956753",
"articleId": "1Iu2J7nECo8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Idr5neUL5e",
"doi": "10.1109/TVCG.2022.3219451",
"abstract": "3D scene graph generation (SGG) has been of high interest in computer vision. Although the accuracy of 3D SGG on coarse classification and single relation label has been gradually improved, the performance of existing works is still far from being perfect for fine-grained and multi-label situations. In this paper, we propose a framework fully exploring contextual information for the 3D SGG task, which attempts to satisfy the requirements of fine-grained entity class, multiple relation labels, and high accuracy simultaneously. Our proposed approach is composed of a Graph Feature Extraction module and a Graph Contextual Reasoning module, achieving appropriate information-redundancy feature extraction, structured organization, and hierarchical inferring. Our approach achieves superior or competitive performance over previous methods on the 3DSSG dataset, especially on the relationship prediction sub-task.",
"abstracts": [
{
"abstractType": "Regular",
"content": "3D scene graph generation (SGG) has been of high interest in computer vision. Although the accuracy of 3D SGG on coarse classification and single relation label has been gradually improved, the performance of existing works is still far from being perfect for fine-grained and multi-label situations. In this paper, we propose a framework fully exploring contextual information for the 3D SGG task, which attempts to satisfy the requirements of fine-grained entity class, multiple relation labels, and high accuracy simultaneously. Our proposed approach is composed of a Graph Feature Extraction module and a Graph Contextual Reasoning module, achieving appropriate information-redundancy feature extraction, structured organization, and hierarchical inferring. Our approach achieves superior or competitive performance over previous methods on the 3DSSG dataset, especially on the relationship prediction sub-task.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "3D scene graph generation (SGG) has been of high interest in computer vision. Although the accuracy of 3D SGG on coarse classification and single relation label has been gradually improved, the performance of existing works is still far from being perfect for fine-grained and multi-label situations. In this paper, we propose a framework fully exploring contextual information for the 3D SGG task, which attempts to satisfy the requirements of fine-grained entity class, multiple relation labels, and high accuracy simultaneously. Our proposed approach is composed of a Graph Feature Extraction module and a Graph Contextual Reasoning module, achieving appropriate information-redundancy feature extraction, structured organization, and hierarchical inferring. Our approach achieves superior or competitive performance over previous methods on the 3DSSG dataset, especially on the relationship prediction sub-task.",
"title": "Explore Contextual Information for 3D Scene Graph Generation",
"normalizedTitle": "Explore Contextual Information for 3D Scene Graph Generation",
"fno": "09947006",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Feature Extraction",
"Three Dimensional Displays",
"Task Analysis",
"Visualization",
"Skeleton",
"Cognition",
"Message Passing",
"Scene Understanding",
"Context Exploration",
"Graph Skeleton",
"Scene Graph Generation"
],
"authors": [
{
"givenName": "Yuanyuan",
"surname": "Liu",
"fullName": "Yuanyuan Liu",
"affiliation": "Department of Electronic Information and Electrical Engineering, Dalian University of Technology, Dalian, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chengjiang",
"surname": "Long",
"fullName": "Chengjiang Long",
"affiliation": "currently a Research Scientist, Meta Reality Labs, Burlingame, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhaoxuan",
"surname": "Zhang",
"fullName": "Zhaoxuan Zhang",
"affiliation": "Department of Electronic Information and Electrical Engineering, Dalian University of Technology, Dalian, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bokai",
"surname": "Liu",
"fullName": "Bokai Liu",
"affiliation": "Department of Electronic Information and Electrical Engineering, Dalian University of Technology, Dalian, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Qiang",
"surname": "Zhang",
"fullName": "Qiang Zhang",
"affiliation": "Key Lab of Advanced Design and Intelligent Computing, (Dalian University), Ministry of Education, Dalian, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Baocai",
"surname": "Yin",
"fullName": "Baocai Yin",
"affiliation": "Department of Electronic Information and Electrical Engineering, Dalian University of Technology, Dalian, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xin",
"surname": "Yang",
"fullName": "Xin Yang",
"affiliation": "Department of Electronic Information and Electrical Engineering, Dalian University of Technology, Dalian, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-11-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457d097",
"title": "Scene Graph Generation by Iterative Message Passing",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457d097/12OmNBAqZH0",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859970",
"title": "Multi-Scale Graph Attention Network for Scene Graph Generation",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859970/1G9EpEewD16",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859944",
"title": "Zero-Shot Scene Graph Generation with Knowledge Graph Completion",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859944/1G9EuqL6nzG",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600p5575",
"title": "Not All Relations are Equal: Mining Informative Labels for Scene Graph Generation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600p5575/1H1iflRfdEA",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600t9454",
"title": "HL-Net: Heterophily Learning Network for Scene Graph Generation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600t9454/1H1lCef5GSY",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956712",
"title": "Zero-shot Scene Graph Generation with Relational Graph Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956712/1IHpXOouE7u",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800d743",
"title": "GPS-Net: Graph Property Sensing Network for Scene Graph Generation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800d743/1m3o2oONVsY",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800d713",
"title": "Unbiased Scene Graph Generation From Biased Training",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800d713/1m3o31iArJe",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2021/3864/0/09428472",
"title": "Relationship-Aware Primal-Dual Graph Attention Network For Scene Graph Generation",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2021/09428472/1uilSfRdZcs",
"parentPublication": {
"id": "proceedings/icme/2021/3864/0",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900j700",
"title": "Exploiting Edge-Oriented Reasoning for 3D Point-based Scene Graph Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900j700/1yeMcyroL2o",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09944974",
"articleId": "1IbMbZhmPE4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09950620",
"articleId": "1Ik4IPEtvu8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1IiLI178vAY",
"name": "ttg555501-09947006s1-supp1-3219451.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09947006s1-supp1-3219451.pdf",
"extension": "pdf",
"size": "24.8 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1IbMbZhmPE4",
"doi": "10.1109/TVCG.2022.3221014",
"abstract": "Node-link diagrams enable visual assessment of homophily when viewers can identify and evaluate the relative number of intra-cluster and inter-cluster links. Our online experiment shows that a new design with link type encoded edge color leads to more accurate perception of homophily than a design with same-color edges.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Node-link diagrams enable visual assessment of homophily when viewers can identify and evaluate the relative number of intra-cluster and inter-cluster links. Our online experiment shows that a new design with link type encoded edge color leads to more accurate perception of homophily than a design with same-color edges.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Node-link diagrams enable visual assessment of homophily when viewers can identify and evaluate the relative number of intra-cluster and inter-cluster links. Our online experiment shows that a new design with link type encoded edge color leads to more accurate perception of homophily than a design with same-color edges.",
"title": "Color-encoded Links Improve Homophily Perception in Node-Link Diagrams",
"normalizedTitle": "Color-encoded Links Improve Homophily Perception in Node-Link Diagrams",
"fno": "09944974",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visualization",
"Task Analysis",
"Layout",
"Image Color Analysis",
"Standards",
"Psychology",
"Color",
"Information Visualization",
"Node Link Diagrams",
"Homophily",
"Perception"
],
"authors": [
{
"givenName": "Daniel",
"surname": "Reimann",
"fullName": "Daniel Reimann",
"affiliation": "Department of Psychology, FernUniversität in Hagen, Hagen, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "André",
"surname": "Schulz",
"fullName": "André Schulz",
"affiliation": "Department of Mathematics and Computer Science, FernUniversität in Hagen, Hagen, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nilam",
"surname": "Ram",
"fullName": "Nilam Ram",
"affiliation": "Departments of Psychology and Communication, Stanford University, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Robert",
"surname": "Gaschler",
"fullName": "Robert Gaschler",
"affiliation": "Department of Psychology, FernUniversität in Hagen, Hagen, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-11-01 00:00:00",
"pubType": "trans",
"pages": "1-7",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2014/4103/0/4103a034",
"title": "Interactive Similarity Links in Treemap Visualizations",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2014/4103a034/12OmNAnMuLr",
"parentPublication": {
"id": "proceedings/iv/2014/4103/0",
"title": "2014 18th International Conference on Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2014/4103/0/4103a053",
"title": "Partial Link Drawings for Nodes, Links, and Regions of Interest",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2014/4103a053/12OmNqAU6pE",
"parentPublication": {
"id": "proceedings/iv/2014/4103/0",
"title": "2014 18th International Conference on Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iai/2004/8387/0/01300947",
"title": "Using inverse image frequency for perception-based color image quantization",
"doi": null,
"abstractUrl": "/proceedings-article/iai/2004/01300947/12OmNyaoDF1",
"parentPublication": {
"id": "proceedings/iai/2004/8387/0",
"title": "2004 Southwest Symposium on Image Analysis and Interpretation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06875960",
"title": "Reinforcing Visual Grouping Cues to Communicate Complex Informational Structure",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06875960/13rRUwInvB8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06876036",
"title": "Node, Node-Link, and Node-Link-Group Diagrams: An Evaluation",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06876036/13rRUxZ0o1D",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440853",
"title": "Optimizing Color Assignment for Perception of Class Separability in Multiclass Scatterplots",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440853/17D45VTRoxJ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797714",
"title": "PILC Projector: RGB-IR Projector for Pixel-level Infrared Light Communication",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797714/1cJ0L8WggAE",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933760",
"title": "Evaluating Gradient Perception in Color-Coded Scalar Fields",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933760/1fTgHHw1pSM",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09523761",
"title": "Evaluating Effects of Background Stories on Graph Perception",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09523761/1wnLgUKA2fm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccea/2021/2616/0/261600a224",
"title": "Thematic Map Color Matching Design Based On Geese Swarm Optimization Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/iccea/2021/261600a224/1y4owqkadBC",
"parentPublication": {
"id": "proceedings/iccea/2021/2616/0",
"title": "2021 International Conference on Computer Engineering and Application (ICCEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09944192",
"articleId": "1Ia7fREeHHW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09947006",
"articleId": "1Idr5neUL5e",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1I8NUS7Puc8",
"doi": "10.1109/TVCG.2022.3220575",
"abstract": "The field of smooth vector graphics explores the representation, creation, rasterization, and automatic generation of light-weight image representations, frequently used for scalable image content. Over the past decades, several conceptual approaches on the representation of images with smooth gradients have emerged that each led to separate research threads, including the popular gradient meshes and diffusion curves. As the computational models matured, the mathematical descriptions diverged and papers started to focus more narrowly on subproblems, such as on the representation and creation of vector graphics, or the automatic vectorization from raster images. Most of the work concentrated on a specific mathematical model only. With this survey, we describe the established computational models in a consistent notation to spur further knowledge transfer, leveraging the recent advances in each field. We therefore categorize vector graphics papers from the last decades based on their underlying mathematical representations as well as on their contribution to the vector graphics content creation pipeline, comprising representation, creation, rasterization, and automatic image vectorization. This survey is meant as an entry point for both artists and researchers. We conclude this survey with an outlook on promising research directions and challenges to overcome in the future.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The field of smooth vector graphics explores the representation, creation, rasterization, and automatic generation of light-weight image representations, frequently used for scalable image content. Over the past decades, several conceptual approaches on the representation of images with smooth gradients have emerged that each led to separate research threads, including the popular gradient meshes and diffusion curves. As the computational models matured, the mathematical descriptions diverged and papers started to focus more narrowly on subproblems, such as on the representation and creation of vector graphics, or the automatic vectorization from raster images. Most of the work concentrated on a specific mathematical model only. With this survey, we describe the established computational models in a consistent notation to spur further knowledge transfer, leveraging the recent advances in each field. We therefore categorize vector graphics papers from the last decades based on their underlying mathematical representations as well as on their contribution to the vector graphics content creation pipeline, comprising representation, creation, rasterization, and automatic image vectorization. This survey is meant as an entry point for both artists and researchers. We conclude this survey with an outlook on promising research directions and challenges to overcome in the future.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The field of smooth vector graphics explores the representation, creation, rasterization, and automatic generation of light-weight image representations, frequently used for scalable image content. Over the past decades, several conceptual approaches on the representation of images with smooth gradients have emerged that each led to separate research threads, including the popular gradient meshes and diffusion curves. As the computational models matured, the mathematical descriptions diverged and papers started to focus more narrowly on subproblems, such as on the representation and creation of vector graphics, or the automatic vectorization from raster images. Most of the work concentrated on a specific mathematical model only. With this survey, we describe the established computational models in a consistent notation to spur further knowledge transfer, leveraging the recent advances in each field. We therefore categorize vector graphics papers from the last decades based on their underlying mathematical representations as well as on their contribution to the vector graphics content creation pipeline, comprising representation, creation, rasterization, and automatic image vectorization. This survey is meant as an entry point for both artists and researchers. We conclude this survey with an outlook on promising research directions and challenges to overcome in the future.",
"title": "A Survey of Smooth Vector Graphics: Recent Advances in Representation, Creation, Rasterization and Image Vectorization",
"normalizedTitle": "A Survey of Smooth Vector Graphics: Recent Advances in Representation, Creation, Rasterization and Image Vectorization",
"fno": "09942350",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Graphics",
"Image Color Analysis",
"Shape",
"Mathematical Models",
"Rendering Computer Graphics",
"Splines Mathematics",
"Solids",
"Diffusion Curves",
"Gradient Meshes",
"Survey",
"Vector Graphics"
],
"authors": [
{
"givenName": "Xingze",
"surname": "Tian",
"fullName": "Xingze Tian",
"affiliation": "Department of Computer Science, Friedrich-Alexander-Universität Erlangen, Nürnberg, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tobias",
"surname": "Günther",
"fullName": "Tobias Günther",
"affiliation": "Department of Computer Science, Friedrich-Alexander-Universität Erlangen, Nürnberg, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-11-01 00:00:00",
"pubType": "trans",
"pages": "1-20",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/isads/2005/8963/0/01452121",
"title": "Fast algorithm for line rasterization by using slope 1",
"doi": null,
"abstractUrl": "/proceedings-article/isads/2005/01452121/12OmNBCHMKs",
"parentPublication": {
"id": "proceedings/isads/2005/8963/0",
"title": "Proceedings. ISADS 2005. 2005 International Symposium on Autonomous Decentralized Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cmc/2009/3501/3/3501c008",
"title": "Vector Graphics Rendering on Mobile Device",
"doi": null,
"abstractUrl": "/proceedings-article/cmc/2009/3501c008/12OmNBhZ4i8",
"parentPublication": {
"id": "proceedings/cmc/2009/3501/3",
"title": "2009 WRI International Conference on Communications and Mobile Computing. CMC 2009",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2013/4999/0/06628586",
"title": "Vectorization of 3D-Characters by Integral Invariant Filtering of High-Resolution Triangular Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2013/06628586/12OmNwoxSfW",
"parentPublication": {
"id": "proceedings/icdar/2013/4999/0",
"title": "2013 12th International Conference on Document Analysis and Recognition (ICDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cmpcon/1992/2655/0/00186697",
"title": "Scalable graphics enhancements for PA-RISC workstations",
"doi": null,
"abstractUrl": "/proceedings-article/cmpcon/1992/00186697/12OmNxGSm2u",
"parentPublication": {
"id": "proceedings/cmpcon/1992/2655/0",
"title": "COMPCON Spring 1992",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcs/2002/1626/0/16260281",
"title": "Distributed Rasterization using OpenGL",
"doi": null,
"abstractUrl": "/proceedings-article/hpcs/2002/16260281/12OmNyqRnpd",
"parentPublication": {
"id": "proceedings/hpcs/2002/1626/0",
"title": "High Performance Computing Systems and Applications, Annual International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/02/07399427",
"title": "Manga Vectorization and Manipulation with Procedural Simple Screentone",
"doi": null,
"abstractUrl": "/journal/tg/2017/02/07399427/13rRUwIF69n",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/si/2013/07/06264117",
"title": "Efficient Vector Graphics Rasterization Accelerator Using Optimized Scan-Line Buffer",
"doi": null,
"abstractUrl": "/journal/si/2013/07/06264117/13rRUwInvcO",
"parentPublication": {
"id": "trans/si",
"title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/02/ttg2011020159",
"title": "Efficient Rasterization for Outdoor Radio Wave Propagation",
"doi": null,
"abstractUrl": "/journal/tg/2011/02/ttg2011020159/13rRUwcAqqb",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900f668",
"title": "Vectorization and Rasterization: Self-Supervised Learning for Sketch and Handwriting",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900f668/1yeJK0IAFvW",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09940545",
"articleId": "1I6O5QqMxQ4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09944192",
"articleId": "1Ia7fREeHHW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Ia7fREeHHW",
"doi": "10.1109/TVCG.2022.3220773",
"abstract": "Light fields are 4D scene representations that are typically structured as arrays of views or several directional samples per pixel in a single view. However, this highly correlated structure is not very efficient to transmit and manipulate, especially for editing. To tackle this issue, we propose a novel representation learning framework that can encode the light field into a single meta-view that is both compact and editable. Specifically, the meta-view composes of three visual channels and a complementary meta channel that is embedded with geometric and residual appearance information. The visual channels can be edited using existing 2D image editing tools, before reconstructing the whole edited light field. To facilitate edit propagation against occlusion, we design a special editing-aware decoding network that consistently propagates the visual edits to the whole light field upon reconstruction. Extensive experiments show that our proposed method achieves competitive representation accuracy and meanwhile enables consistent edit propagation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Light fields are 4D scene representations that are typically structured as arrays of views or several directional samples per pixel in a single view. However, this highly correlated structure is not very efficient to transmit and manipulate, especially for editing. To tackle this issue, we propose a novel representation learning framework that can encode the light field into a single meta-view that is both compact and editable. Specifically, the meta-view composes of three visual channels and a complementary meta channel that is embedded with geometric and residual appearance information. The visual channels can be edited using existing 2D image editing tools, before reconstructing the whole edited light field. To facilitate edit propagation against occlusion, we design a special editing-aware decoding network that consistently propagates the visual edits to the whole light field upon reconstruction. Extensive experiments show that our proposed method achieves competitive representation accuracy and meanwhile enables consistent edit propagation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Light fields are 4D scene representations that are typically structured as arrays of views or several directional samples per pixel in a single view. However, this highly correlated structure is not very efficient to transmit and manipulate, especially for editing. To tackle this issue, we propose a novel representation learning framework that can encode the light field into a single meta-view that is both compact and editable. Specifically, the meta-view composes of three visual channels and a complementary meta channel that is embedded with geometric and residual appearance information. The visual channels can be edited using existing 2D image editing tools, before reconstructing the whole edited light field. To facilitate edit propagation against occlusion, we design a special editing-aware decoding network that consistently propagates the visual edits to the whole light field upon reconstruction. Extensive experiments show that our proposed method achieves competitive representation accuracy and meanwhile enables consistent edit propagation.",
"title": "LF2MV: Learning An Editable Meta-View Towards Light Field Representation",
"normalizedTitle": "LF2MV: Learning An Editable Meta-View Towards Light Field Representation",
"fno": "09944192",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visualization",
"Image Reconstruction",
"Decoding",
"Representation Learning",
"Standards",
"Image Coding",
"Geometry",
"Light Field",
"Compact Representation",
"Editing Propagation",
"Representation Learning"
],
"authors": [
{
"givenName": "Menghan",
"surname": "Xia",
"fullName": "Menghan Xia",
"affiliation": "Department of Computer Science and Engineering, The Chinese University of Hong Kong, HK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jose",
"surname": "Echevarria",
"fullName": "Jose Echevarria",
"affiliation": "Adobe System Inc, US",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Minshan",
"surname": "Xie",
"fullName": "Minshan Xie",
"affiliation": "Department of Computer Science and Engineering, The Chinese University of Hong Kong, HK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tien-Tsin",
"surname": "Wong",
"fullName": "Tien-Tsin Wong",
"affiliation": "Department of Computer Science and Engineering, The Chinese University of Hong Kong, HK",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-11-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2012/1611/0/06239346",
"title": "Light field denoising, light field superresolution and stereo camera based refocussing using a GMM light field patch prior",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2012/06239346/12OmNqHqSqk",
"parentPublication": {
"id": "proceedings/cvprw/2012/1611/0",
"title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457g873",
"title": "Snapshot Hyperspectral Light Field Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457g873/12OmNqNG3iJ",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2018/1857/0/185701a321",
"title": "Robust Surface Light Field Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2018/185701a321/12OmNrIJqDf",
"parentPublication": {
"id": "proceedings/mipr/2018/1857/0",
"title": "2018 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2015/7082/0/07177447",
"title": "Light field image editing by 4D patch synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2015/07177447/12OmNrMZpla",
"parentPublication": {
"id": "proceedings/icme/2015/7082/0",
"title": "2015 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2017/2610/0/261001a029",
"title": "4D Temporally Coherent Light-Field Video",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a029/12OmNwErpCH",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2014/03/ttp2014030606",
"title": "Variational Light Field Analysis for Disparity Estimation and Super-Resolution",
"doi": null,
"abstractUrl": "/journal/tp/2014/03/ttp2014030606/13rRUxC0SPN",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a012",
"title": "Surface Light Field Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a012/17D45WODasr",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2019/2506/0/250600b853",
"title": "An Epipolar Volume Autoencoder With Adversarial Loss for Deep Light Field Super-Resolution",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2019/250600b853/1iTvlh1qLGU",
"parentPublication": {
"id": "proceedings/cvprw/2019/2506/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090524",
"title": "Light Field Editing Propagation using 4D Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090524/1jIxjGuj4zK",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2021/04/09392312",
"title": "LFI-Augmenter: Intelligent Light Field Image Editing With Interleaved Spatial-Angular Convolution",
"doi": null,
"abstractUrl": "/magazine/mu/2021/04/09392312/1sq7wcFIASI",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09942350",
"articleId": "1I8NUS7Puc8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09944974",
"articleId": "1IbMbZhmPE4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1IbMcu77eGA",
"name": "ttg555501-09944192s1-supp1-3220773.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09944192s1-supp1-3220773.pdf",
"extension": "pdf",
"size": "5.19 MB",
"__typename": "WebExtraType"
},
{
"id": "1IbMcltcz6M",
"name": "ttg555501-09944192s1-supp2-3220773.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09944192s1-supp2-3220773.mp4",
"extension": "mp4",
"size": "40.4 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1I6O5QqMxQ4",
"doi": "10.1109/TVCG.2022.3219982",
"abstract": "Distributed ray tracing algorithms are widely used when rendering massive scenes, where data utilization and load balancing are the keys to improving performance. One essential observation is that rays are temporally coherent, which indicates that temporal information can be used to improve computational efficiency. In this paper, we use temporal coherence to optimize the performance of distributed ray tracing. First, we propose a temporal coherence-based scheduling algorithm to guide the task/data assignment and scheduling. Then, we propose a virtual portal structure to predict the radiance of rays based on the previous frame, and send the rays with low radiance to a precomputed simplified model for further tracing, which can dramatically reduce the traversal complexity and the overhead of network data transmission. The approach was validated on scenes of sizes up to 355 GB. Our algorithm can achieve a speedup of up to 81% compared to previous algorithms, with a very small mean squared error.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Distributed ray tracing algorithms are widely used when rendering massive scenes, where data utilization and load balancing are the keys to improving performance. One essential observation is that rays are temporally coherent, which indicates that temporal information can be used to improve computational efficiency. In this paper, we use temporal coherence to optimize the performance of distributed ray tracing. First, we propose a temporal coherence-based scheduling algorithm to guide the task/data assignment and scheduling. Then, we propose a virtual portal structure to predict the radiance of rays based on the previous frame, and send the rays with low radiance to a precomputed simplified model for further tracing, which can dramatically reduce the traversal complexity and the overhead of network data transmission. The approach was validated on scenes of sizes up to 355 GB. Our algorithm can achieve a speedup of up to 81% compared to previous algorithms, with a very small mean squared error.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Distributed ray tracing algorithms are widely used when rendering massive scenes, where data utilization and load balancing are the keys to improving performance. One essential observation is that rays are temporally coherent, which indicates that temporal information can be used to improve computational efficiency. In this paper, we use temporal coherence to optimize the performance of distributed ray tracing. First, we propose a temporal coherence-based scheduling algorithm to guide the task/data assignment and scheduling. Then, we propose a virtual portal structure to predict the radiance of rays based on the previous frame, and send the rays with low radiance to a precomputed simplified model for further tracing, which can dramatically reduce the traversal complexity and the overhead of network data transmission. The approach was validated on scenes of sizes up to 355 GB. Our algorithm can achieve a speedup of up to 81% compared to previous algorithms, with a very small mean squared error.",
"title": "Temporal Coherence-Based Distributed Ray Tracing of Massive Scenes",
"normalizedTitle": "Temporal Coherence-Based Distributed Ray Tracing of Massive Scenes",
"fno": "09940545",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Rendering Computer Graphics",
"Ray Tracing",
"Portals",
"Heuristic Algorithms",
"Dynamic Scheduling",
"Task Analysis",
"Distributed Databases",
"Computer Graphics",
"Distributed Graphics",
"Ray Tracing"
],
"authors": [
{
"givenName": "Xiang",
"surname": "Xu",
"fullName": "Xiang Xu",
"affiliation": "Shandong Key Laboratory of Blockchain Finance, Shandong University of Finance and Economics, Jinan, Shandong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lu",
"surname": "Wang",
"fullName": "Lu Wang",
"affiliation": "School of Software, Shandong University, Jinan, Shandong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Arsène",
"surname": "Pérard-Gayot",
"fullName": "Arsène Pérard-Gayot",
"affiliation": "Wētā Digital, PO Box 15208, Miramar, Wellington, New Zealand",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Richard",
"surname": "Membarth",
"fullName": "Richard Membarth",
"affiliation": "Technische Hochschule Ingolstadt (THI), Research Institute AImotion Bavaria, Ingolstadt, Bayern, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Cuiyu",
"surname": "Li",
"fullName": "Cuiyu Li",
"affiliation": "Advanced Computing East China Sub-center, Suzhou, JiangSu, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chenglei",
"surname": "Yang",
"fullName": "Chenglei Yang",
"affiliation": "School of Software, Shandong University, Jinan, Shandong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Philipp",
"surname": "Slusallek",
"fullName": "Philipp Slusallek",
"affiliation": "German Research Center for Artificial Intelligence (DFKI), Saarland University, Saarland Informatics Campus, Saarbrc̎cken, Saarland, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-11-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/pvg/2003/2091/0/20910011",
"title": "Distributed Interactive Ray Tracing of Dynamic Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/pvg/2003/20910011/12OmNBO3KjK",
"parentPublication": {
"id": "proceedings/pvg/2003/2091/0",
"title": "Parallel and Large-Data Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2013/5099/0/5099a258",
"title": "Dynamic Per Object Ray Caching Textures for Real-Time Ray Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2013/5099a258/12OmNCfAPL8",
"parentPublication": {
"id": "proceedings/sibgrapi/2013/5099/0",
"title": "2013 XXVI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rt/2006/0693/0/04061544",
"title": "RT-DEFORM: Interactive Ray Tracing of Dynamic Scenes using BVHs",
"doi": null,
"abstractUrl": "/proceedings-article/rt/2006/04061544/12OmNwErpUk",
"parentPublication": {
"id": "proceedings/rt/2006/0693/0",
"title": "IEEE Symposium on Interactive Ray Tracing 2006",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1995/7187/0/71870027",
"title": "A Hardware Acceleration Method for Volumetric Ray Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1995/71870027/12OmNxHJ9p1",
"parentPublication": {
"id": "proceedings/ieee-vis/1995/7187/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2010/7846/0/05571280",
"title": "Real-Time Ray Tracing of Complex Molecular Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2010/05571280/12OmNy314eg",
"parentPublication": {
"id": "proceedings/iv/2010/7846/0",
"title": "2010 14th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/09/06081859",
"title": "Combining Single and Packet-Ray Tracing for Arbitrary Ray Distributions on the Intel MIC Architecture",
"doi": null,
"abstractUrl": "/journal/tg/2012/09/06081859/13rRUwInvJe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1997/04/v0316",
"title": "Breadth-First Ray Tracing Utilizing Uniform Spatial Subdivision",
"doi": null,
"abstractUrl": "/journal/tg/1997/04/v0316/13rRUxBa5ne",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2007/06/mcg2007060036",
"title": "Exploring a Boeing 777: Ray Tracing Large-Scale CAD Data",
"doi": null,
"abstractUrl": "/magazine/cg/2007/06/mcg2007060036/13rRUxC0SGw",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08115176",
"title": "Time Interval Ray Tracing for Motion Blur",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08115176/14H4WMfTBId",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cecit/2021/3757/0/375700b168",
"title": "Stackless KD-Tree Traversal For Ray Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/cecit/2021/375700b168/1CdEOBZgTVC",
"parentPublication": {
"id": "proceedings/cecit/2021/3757/0",
"title": "2021 2nd International Conference on Electronics, Communications and Information Technology (CECIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09939115",
"articleId": "1I1KuH1xVF6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09942350",
"articleId": "1I8NUS7Puc8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1I8NUzcFlBu",
"name": "ttg555501-09940545s1-supp1-3219982.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09940545s1-supp1-3219982.mp4",
"extension": "mp4",
"size": "19.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1I1KuH1xVF6",
"doi": "10.1109/TVCG.2022.3219762",
"abstract": "A systematic review (SR) is essential with up-to-date research evidence to support clinical decisions and practices. However, the growing literature volume makes it challenging for SR reviewers and clinicians to discover useful information efficiently. Many human-in-the-loop information retrieval approaches (HIR) have been proposed to rank documents semantically similar to users' queries and provide interactive visualizations to facilitate document retrieval. Given that the queries are mainly composed of keywords and keyphrases retrieving documents that are semantically similar to a query does not necessarily respond to the clinician's need. Clinicians still have to review many documents to find the solution. The problem motivates us to develop a visual analytics system, DocFlow, to facilitate information-seeking. One of the features of our DocFlow is accepting natural language questions. The detailed description enables retrieving documents that can answer users' questions. Additionally, clinicians often categorize documents based on their backgrounds and with different purposes (e.g., populations, treatments). Since the criteria are unknown and cannot be pre-defined in advance, existing methods can only achieve categorization by considering the entire information in documents. In contrast, by locating answers in each document, our DocFlow can intelligently categorize documents based on users' questions. The second feature of our DocFlow is a flexible interface where users can arrange a sequence of questions to customize their rules for document retrieval and categorization. The two features of this visual analytics system support a flexible information-seeking process. The case studies and the feedback from domain experts demonstrate the usefulness and effectiveness of our DocFlow.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A systematic review (SR) is essential with up-to-date research evidence to support clinical decisions and practices. However, the growing literature volume makes it challenging for SR reviewers and clinicians to discover useful information efficiently. Many human-in-the-loop information retrieval approaches (HIR) have been proposed to rank documents semantically similar to users' queries and provide interactive visualizations to facilitate document retrieval. Given that the queries are mainly composed of keywords and keyphrases retrieving documents that are semantically similar to a query does not necessarily respond to the clinician's need. Clinicians still have to review many documents to find the solution. The problem motivates us to develop a visual analytics system, DocFlow, to facilitate information-seeking. One of the features of our DocFlow is accepting natural language questions. The detailed description enables retrieving documents that can answer users' questions. Additionally, clinicians often categorize documents based on their backgrounds and with different purposes (e.g., populations, treatments). Since the criteria are unknown and cannot be pre-defined in advance, existing methods can only achieve categorization by considering the entire information in documents. In contrast, by locating answers in each document, our DocFlow can intelligently categorize documents based on users' questions. The second feature of our DocFlow is a flexible interface where users can arrange a sequence of questions to customize their rules for document retrieval and categorization. The two features of this visual analytics system support a flexible information-seeking process. The case studies and the feedback from domain experts demonstrate the usefulness and effectiveness of our DocFlow.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A systematic review (SR) is essential with up-to-date research evidence to support clinical decisions and practices. However, the growing literature volume makes it challenging for SR reviewers and clinicians to discover useful information efficiently. Many human-in-the-loop information retrieval approaches (HIR) have been proposed to rank documents semantically similar to users' queries and provide interactive visualizations to facilitate document retrieval. Given that the queries are mainly composed of keywords and keyphrases retrieving documents that are semantically similar to a query does not necessarily respond to the clinician's need. Clinicians still have to review many documents to find the solution. The problem motivates us to develop a visual analytics system, DocFlow, to facilitate information-seeking. One of the features of our DocFlow is accepting natural language questions. The detailed description enables retrieving documents that can answer users' questions. Additionally, clinicians often categorize documents based on their backgrounds and with different purposes (e.g., populations, treatments). Since the criteria are unknown and cannot be pre-defined in advance, existing methods can only achieve categorization by considering the entire information in documents. In contrast, by locating answers in each document, our DocFlow can intelligently categorize documents based on users' questions. The second feature of our DocFlow is a flexible interface where users can arrange a sequence of questions to customize their rules for document retrieval and categorization. The two features of this visual analytics system support a flexible information-seeking process. The case studies and the feedback from domain experts demonstrate the usefulness and effectiveness of our DocFlow.",
"title": "DocFlow: A Visual Analytics System for Question-based Document Retrieval and Categorization",
"normalizedTitle": "DocFlow: A Visual Analytics System for Question-based Document Retrieval and Categorization",
"fno": "09939115",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Systematics",
"Visual Analytics",
"Task Analysis",
"Semantics",
"Human In The Loop",
"Natural Languages",
"Bit Error Rate",
"Biomedical Systematic Review",
"Evidence Based Practice",
"Human In The Loop Information Retrieval",
"Question Based Document Retrieval",
"Question Based Document Categorization"
],
"authors": [
{
"givenName": "Rui",
"surname": "Qiu",
"fullName": "Rui Qiu",
"affiliation": "Computer Science and Engineering, The Ohio State University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yamei",
"surname": "Tu",
"fullName": "Yamei Tu",
"affiliation": "Computer Science and Engineering, The Ohio State University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yu-Shuen",
"surname": "Wang",
"fullName": "Yu-Shuen Wang",
"affiliation": "Computer Science, National Yang Ming Chiao Tung University, Taiwan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Po-Yin",
"surname": "Yen",
"fullName": "Po-Yin Yen",
"affiliation": "Institute for Informatics, Washington University School of Medicine, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Han-Wei",
"surname": "Shen",
"fullName": "Han-Wei Shen",
"affiliation": "Computer Science and Engineering, The Ohio State University, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-11-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vast/2014/6227/0/07042511",
"title": "VisIRR: Visual analytics for information retrieval and recommendation with large-scale document data",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2014/07042511/12OmNASraHn",
"parentPublication": {
"id": "proceedings/vast/2014/6227/0",
"title": "2014 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460915",
"title": "Logo spotting for document categorization",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460915/12OmNBInLk9",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wmwa/2009/3646/0/3646a197",
"title": "Web Document Categorization Algorithm Using LDE and MA",
"doi": null,
"abstractUrl": "/proceedings-article/wmwa/2009/3646a197/12OmNBmf3bO",
"parentPublication": {
"id": "proceedings/wmwa/2009/3646/0",
"title": "Web Mining and Web-based Application, Pacific-Asia Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsit/2008/3308/0/3308a367",
"title": "A New Retrieval Ranking Method based on Document Retrieval Expected Value in Chinese Document",
"doi": null,
"abstractUrl": "/proceedings-article/iccsit/2008/3308a367/12OmNBqdr2Y",
"parentPublication": {
"id": "proceedings/iccsit/2008/3308/0",
"title": "2008 International Conference on Computer Science and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fbie/2008/3561/0/3561a437",
"title": "Research on Medical Document Categorization",
"doi": null,
"abstractUrl": "/proceedings-article/fbie/2008/3561a437/12OmNx7G5Sj",
"parentPublication": {
"id": "proceedings/fbie/2008/3561/0",
"title": "2008 International Seminar on Future Biomedical Information Engineering (FBIE 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iitaw/2008/3505/0/3505a601",
"title": "Kernel Discriminant Analysis Algorithm for Document Categorization",
"doi": null,
"abstractUrl": "/proceedings-article/iitaw/2008/3505a601/12OmNyiUBoZ",
"parentPublication": {
"id": "proceedings/iitaw/2008/3505/0",
"title": "2008 International Symposium on Intelligent Information Technology Application Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dexa/2001/1230/0/12300270",
"title": "Document Categorization and Retrieval Using Semantic Microfeatures and Growing Cell Structures",
"doi": null,
"abstractUrl": "/proceedings-article/dexa/2001/12300270/12OmNzaQoaP",
"parentPublication": {
"id": "proceedings/dexa/2001/1230/0",
"title": "12th International Workshop on Database and Expert Systems Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2002/06/i0838",
"title": "Imaged Document Text Retrieval Without OCR",
"doi": null,
"abstractUrl": "/journal/tp/2002/06/i0838/13rRUzphDyR",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aike/2021/3736/0/373600a029",
"title": "Towards Intelligent Legal Advisors for Document Retrieval and Question-Answering in German Legal Documents",
"doi": null,
"abstractUrl": "/proceedings-article/aike/2021/373600a029/1BrADc8Hd5K",
"parentPublication": {
"id": "proceedings/aike/2021/3736/0",
"title": "2021 IEEE Fourth International Conference on Artificial Intelligence and Knowledge Engineering (AIKE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/escience/2019/2451/0/245100a533",
"title": "Iterative Document Retrieval via Deep Learning Approaches for Biomedical Question Answering",
"doi": null,
"abstractUrl": "/proceedings-article/escience/2019/245100a533/1ike1bwk62I",
"parentPublication": {
"id": "proceedings/escience/2019/2451/0",
"title": "2019 15th International Conference on eScience (eScience)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09938388",
"articleId": "1I05BGZpHZ6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09940545",
"articleId": "1I6O5QqMxQ4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1I6O5iENFi8",
"name": "ttg555501-09939115s1-supp1-3219762.mov",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09939115s1-supp1-3219762.mov",
"extension": "mov",
"size": "140 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1I05Bh36uZy",
"doi": "10.1109/TVCG.2022.3219232",
"abstract": "One of the potential solutions for model interpretation is to train a surrogate model: a more transparent model that approximates the behavior of the model to be explained. Typically, <italic>classification rules</italic> or <italic>decision trees</italic> are used due to their logic-based expressions. However, decision trees can grow too deep, and rule sets can become too large to approximate a complex model. Unlike paths on a decision tree that must share ancestor nodes (conditions), rules are more flexible. However, the unstructured visual representation of rules makes it hard to make inferences across rules. In this paper, we focus on tabular data and present novel algorithmic and interactive solutions to address these issues. First, we present <italic>H</italic>ierarchical <italic>S</italic>urrogate <italic>R</italic>ules (HSR), an algorithm that generates hierarchical rules based on user-defined parameters. We also contribute SuRE, a visual analytics (VA) system that integrates HSR and an interactive surrogate rule visualization, the <italic>Feature-Aligned Tree</italic>, which depicts rules as trees while aligning features for easier comparison. We evaluate the algorithm in terms of parameter sensitivity, time performance, and comparison with surrogate decision trees and find that it scales reasonably well and overcomes the shortcomings of surrogate decision trees. We evaluate the visualization and the system through a usability study and an observational study with domain experts. Our investigation shows that the participants can use feature-aligned trees to perform non-trivial tasks with very high accuracy. We also discuss many interesting findings, including a rule analysis task characterization, that can be used for visualization design and future research.",
"abstracts": [
{
"abstractType": "Regular",
"content": "One of the potential solutions for model interpretation is to train a surrogate model: a more transparent model that approximates the behavior of the model to be explained. Typically, <italic>classification rules</italic> or <italic>decision trees</italic> are used due to their logic-based expressions. However, decision trees can grow too deep, and rule sets can become too large to approximate a complex model. Unlike paths on a decision tree that must share ancestor nodes (conditions), rules are more flexible. However, the unstructured visual representation of rules makes it hard to make inferences across rules. In this paper, we focus on tabular data and present novel algorithmic and interactive solutions to address these issues. First, we present <italic>H</italic>ierarchical <italic>S</italic>urrogate <italic>R</italic>ules (HSR), an algorithm that generates hierarchical rules based on user-defined parameters. We also contribute SuRE, a visual analytics (VA) system that integrates HSR and an interactive surrogate rule visualization, the <italic>Feature-Aligned Tree</italic>, which depicts rules as trees while aligning features for easier comparison. We evaluate the algorithm in terms of parameter sensitivity, time performance, and comparison with surrogate decision trees and find that it scales reasonably well and overcomes the shortcomings of surrogate decision trees. We evaluate the visualization and the system through a usability study and an observational study with domain experts. Our investigation shows that the participants can use feature-aligned trees to perform non-trivial tasks with very high accuracy. We also discuss many interesting findings, including a rule analysis task characterization, that can be used for visualization design and future research.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "One of the potential solutions for model interpretation is to train a surrogate model: a more transparent model that approximates the behavior of the model to be explained. Typically, classification rules or decision trees are used due to their logic-based expressions. However, decision trees can grow too deep, and rule sets can become too large to approximate a complex model. Unlike paths on a decision tree that must share ancestor nodes (conditions), rules are more flexible. However, the unstructured visual representation of rules makes it hard to make inferences across rules. In this paper, we focus on tabular data and present novel algorithmic and interactive solutions to address these issues. First, we present Hierarchical Surrogate Rules (HSR), an algorithm that generates hierarchical rules based on user-defined parameters. We also contribute SuRE, a visual analytics (VA) system that integrates HSR and an interactive surrogate rule visualization, the Feature-Aligned Tree, which depicts rules as trees while aligning features for easier comparison. We evaluate the algorithm in terms of parameter sensitivity, time performance, and comparison with surrogate decision trees and find that it scales reasonably well and overcomes the shortcomings of surrogate decision trees. We evaluate the visualization and the system through a usability study and an observational study with domain experts. Our investigation shows that the participants can use feature-aligned trees to perform non-trivial tasks with very high accuracy. We also discuss many interesting findings, including a rule analysis task characterization, that can be used for visualization design and future research.",
"title": "Visual Exploration of Machine Learning Model Behavior with Hierarchical Surrogate Rule Sets",
"normalizedTitle": "Visual Exploration of Machine Learning Model Behavior with Hierarchical Surrogate Rule Sets",
"fno": "09937064",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Decision Trees",
"Data Models",
"Behavioral Sciences",
"Analytical Models",
"Predictive Models",
"Feature Extraction",
"Data Visualization",
"Visualization",
"Rule Set",
"Surrogate Model",
"Model Understanding"
],
"authors": [
{
"givenName": "Jun",
"surname": "Yuan",
"fullName": "Jun Yuan",
"affiliation": "New York University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Brian",
"surname": "Barr",
"fullName": "Brian Barr",
"affiliation": "Capital One, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kyle",
"surname": "Overton",
"fullName": "Kyle Overton",
"affiliation": "Capital One, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Enrico",
"surname": "Bertini",
"fullName": "Enrico Bertini",
"affiliation": "Northeastern University, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-11-01 00:00:00",
"pubType": "trans",
"pages": "1-18",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2008/2174/0/04761257",
"title": "Feature selection via decision tree surrogate splits",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761257/12OmNvk7K6y",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/06/09751203",
"title": "GNN-Surrogate: A Hierarchical and Adaptive Graph Neural Network for Parameter Space Exploration of Unstructured-Mesh Ocean Simulations",
"doi": null,
"abstractUrl": "/journal/tg/2022/06/09751203/1CnxNEIPqE0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tq/5555/01/09878222",
"title": "Studying the Robustness of Anti-Adversarial Federated Learning Models Detecting Cyberattacks in IoT Spectrum Sensors",
"doi": null,
"abstractUrl": "/journal/tq/5555/01/09878222/1GrP91HemEo",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904429",
"title": "VDL-Surrogate: A View-Dependent Latent-based Model for Parameter Space Exploration of Ensemble Simulations",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904429/1H1gjOQxk40",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/5555/01/09946438",
"title": "Unauthorized Microphone Access Restraint Based on User Behavior Perception in Mobile Devices",
"doi": null,
"abstractUrl": "/journal/tm/5555/01/09946438/1Idr41RFHkk",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ica/2022/6936/0/693600a006",
"title": "GORITE: A BDI Realisation of Behavior Trees",
"doi": null,
"abstractUrl": "/proceedings-article/ica/2022/693600a006/1JvaJ44YwXm",
"parentPublication": {
"id": "proceedings/ica/2022/6936/0",
"title": "2022 IEEE International Conference on Agents (ICA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2022/8045/0/10021089",
"title": "Exploring the Target Distribution for Surrogate-Based Black-Box Attacks",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2022/10021089/1KfRJiz7RvO",
"parentPublication": {
"id": "proceedings/big-data/2022/8045/0",
"title": "2022 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/5555/01/10032660",
"title": "Characterizing Internet Card User Portraits for Efficient Churn Prediction Model Design",
"doi": null,
"abstractUrl": "/journal/tm/5555/01/10032660/1KnSnQeg0P6",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tq/5555/01/10080971",
"title": "Time to Think the Security of WiFi-Based Behavior Recognition Systems",
"doi": null,
"abstractUrl": "/journal/tq/5555/01/10080971/1LM6Z41TsXK",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/5555/01/10106642",
"title": "Behavior Trees and State Machines in Robotics Applications",
"doi": null,
"abstractUrl": "/journal/ts/5555/01/10106642/1MwAu1wj4Oc",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09933028",
"articleId": "1HVsnduN8e4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09937145",
"articleId": "1I05Bw9xb6o",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1I1Kyb9X5gQ",
"name": "ttg555501-09937064s1-supp2-3219232.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09937064s1-supp2-3219232.pdf",
"extension": "pdf",
"size": "56.9 kB",
"__typename": "WebExtraType"
},
{
"id": "1I1KzPkH76E",
"name": "ttg555501-09937064s1-supp1-3219232.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09937064s1-supp1-3219232.mp4",
"extension": "mp4",
"size": "80.9 MB",
"__typename": "WebExtraType"
},
{
"id": "1I1KygoXNcI",
"name": "ttg555501-09937064s1-supp3-3219232.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09937064s1-supp3-3219232.pdf",
"extension": "pdf",
"size": "1.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1I05Bw9xb6o",
"doi": "10.1109/TVCG.2022.3219248",
"abstract": "In recent years, visual analytics (VA) has shown promise in alleviating the challenges of interpreting black-box deep learning (DL) models. While the focus of VA for explainable DL has been mainly on classification problems, DL is gaining popularity in high-dimensional-to-high-dimensional (<italic>H-H</italic>) problems such as image-to-image translation. In contrast to classification, <italic>H-H</italic> problems have no explicit instance groups or classes to study. Each output is continuous, high-dimensional, and changes in an unknown non-linear manner with changes in the input. These unknown relations between the input, model and output necessitate the user to analyze them in conjunction, leveraging symmetries between them. Since classification tasks do not exhibit some of these challenges, most existing VA systems and frameworks allow limited control of the components required to analyze models beyond classification. Hence, we identify the need for and present a unified conceptual framework, the <italic>Transform-and-Perform</italic> framework (<italic>T&P</italic>), to facilitate the design of VA systems for DL model analysis focusing on <italic>H-H</italic> problems. <italic>T&P</italic> provides a checklist to structure and identify workflows and analysis strategies to design new VA systems, and understand existing ones to uncover potential gaps for improvements. The goal is to aid the creation of effective VA systems that support the structuring of model understanding and identifying actionable insights for model improvements. We highlight the growing need for new frameworks like <italic>T&P</italic> with a real-world image-to-image translation application. We illustrate how <italic>T&P</italic> effectively supports the understanding and identification of potential gaps in existing VA systems.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In recent years, visual analytics (VA) has shown promise in alleviating the challenges of interpreting black-box deep learning (DL) models. While the focus of VA for explainable DL has been mainly on classification problems, DL is gaining popularity in high-dimensional-to-high-dimensional (<italic>H-H</italic>) problems such as image-to-image translation. In contrast to classification, <italic>H-H</italic> problems have no explicit instance groups or classes to study. Each output is continuous, high-dimensional, and changes in an unknown non-linear manner with changes in the input. These unknown relations between the input, model and output necessitate the user to analyze them in conjunction, leveraging symmetries between them. Since classification tasks do not exhibit some of these challenges, most existing VA systems and frameworks allow limited control of the components required to analyze models beyond classification. Hence, we identify the need for and present a unified conceptual framework, the <italic>Transform-and-Perform</italic> framework (<italic>T&P</italic>), to facilitate the design of VA systems for DL model analysis focusing on <italic>H-H</italic> problems. <italic>T&P</italic> provides a checklist to structure and identify workflows and analysis strategies to design new VA systems, and understand existing ones to uncover potential gaps for improvements. The goal is to aid the creation of effective VA systems that support the structuring of model understanding and identifying actionable insights for model improvements. We highlight the growing need for new frameworks like <italic>T&P</italic> with a real-world image-to-image translation application. We illustrate how <italic>T&P</italic> effectively supports the understanding and identification of potential gaps in existing VA systems.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In recent years, visual analytics (VA) has shown promise in alleviating the challenges of interpreting black-box deep learning (DL) models. While the focus of VA for explainable DL has been mainly on classification problems, DL is gaining popularity in high-dimensional-to-high-dimensional (H-H) problems such as image-to-image translation. In contrast to classification, H-H problems have no explicit instance groups or classes to study. Each output is continuous, high-dimensional, and changes in an unknown non-linear manner with changes in the input. These unknown relations between the input, model and output necessitate the user to analyze them in conjunction, leveraging symmetries between them. Since classification tasks do not exhibit some of these challenges, most existing VA systems and frameworks allow limited control of the components required to analyze models beyond classification. Hence, we identify the need for and present a unified conceptual framework, the Transform-and-Perform framework (T&P), to facilitate the design of VA systems for DL model analysis focusing on H-H problems. T&P provides a checklist to structure and identify workflows and analysis strategies to design new VA systems, and understand existing ones to uncover potential gaps for improvements. The goal is to aid the creation of effective VA systems that support the structuring of model understanding and identifying actionable insights for model improvements. We highlight the growing need for new frameworks like T&P with a real-world image-to-image translation application. We illustrate how T&P effectively supports the understanding and identification of potential gaps in existing VA systems.",
"title": "The <italic>Transform-and-Perform</italic> framework: Explainable deep learning beyond classification",
"normalizedTitle": "The Transform-and-Perform framework: Explainable deep learning beyond classification",
"fno": "09937145",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Analytical Models",
"Behavioral Sciences",
"Complexity Theory",
"Computational Modeling",
"Task Analysis",
"Context Modeling",
"Brain Modeling",
"Visual Analytics",
"Explainable AI",
"XAI",
"Framework",
"Deep Learning",
"High Dimensional To High Dimensional Translation"
],
"authors": [
{
"givenName": "Vidya",
"surname": "Prasad",
"fullName": "Vidya Prasad",
"affiliation": "Department of Mathematics and Computer Science, Eindhoven University of Technology, Eindhoven, The Netherlands",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ruud J. G.",
"surname": "van Sloun",
"fullName": "Ruud J. G. van Sloun",
"affiliation": "Department of Electrical Engineering, Eindhoven University of Technology, Eindhoven, The Netherlands",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Stef van den",
"surname": "Elzen",
"fullName": "Stef van den Elzen",
"affiliation": "Department of Mathematics and Computer Science, Eindhoven University of Technology, Eindhoven, The Netherlands",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anna",
"surname": "Vilanova",
"fullName": "Anna Vilanova",
"affiliation": "Department of Mathematics and Computer Science, Eindhoven University of Technology, Eindhoven, The Netherlands",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nicola",
"surname": "Pezzotti",
"fullName": "Nicola Pezzotti",
"affiliation": "Department of Mathematics and Computer Science, Eindhoven University of Technology, Eindhoven, The Netherlands",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-11-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tq/5555/01/09878222",
"title": "Studying the Robustness of Anti-Adversarial Federated Learning Models Detecting Cyberattacks in IoT Spectrum Sensors",
"doi": null,
"abstractUrl": "/journal/tq/5555/01/09878222/1GrP91HemEo",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tq/5555/01/09928211",
"title": "FewM-HGCL : Few-Shot Malware Variants Detection Via Heterogeneous Graph Contrastive Learning",
"doi": null,
"abstractUrl": "/journal/tq/5555/01/09928211/1HJuUzzFey4",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipccc/2022/8018/0/09894322",
"title": "Exploring Adversarial Attacks on Neural Networks: An Explainable Approach",
"doi": null,
"abstractUrl": "/proceedings-article/ipccc/2022/09894322/1HpCusekVfW",
"parentPublication": {
"id": "proceedings/ipccc/2022/8018/0",
"title": "2022 IEEE International Performance, Computing, and Communications Conference (IPCCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09937064",
"title": "Visual Exploration of Machine Learning Model Behavior with Hierarchical Surrogate Rule Sets",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09937064/1I05Bh36uZy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/2023/04/09961906",
"title": "To Follow or Not to Follow: Understanding <italic>Issue/Pull-Request Templates</italic> on GitHub",
"doi": null,
"abstractUrl": "/journal/ts/2023/04/09961906/1Ixw0ySXhTy",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/5555/01/10032660",
"title": "Characterizing Internet Card User Portraits for Efficient Churn Prediction Model Design",
"doi": null,
"abstractUrl": "/journal/tm/5555/01/10032660/1KnSnQeg0P6",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/5555/01/10113794",
"title": "Efficient and Robust KPI Outlier Detection for Large-Scale Datacenters",
"doi": null,
"abstractUrl": "/journal/tc/5555/01/10113794/1MNbTs1XcIM",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2022/02/09207859",
"title": "Linear Time Reconciliation With Bounded Transfers of Genes",
"doi": null,
"abstractUrl": "/journal/tb/2022/02/09207859/1nuwhaK3I2c",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/12/09384328",
"title": "Learning Rates for Stochastic Gradient Descent With Nonconvex Objectives",
"doi": null,
"abstractUrl": "/journal/tp/2021/12/09384328/1scDnUQn6JW",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552241",
"title": "<italic>COVID</italic>-view: Diagnosis of COVID-19 using Chest CT",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552241/1xic6RdmNC8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09937064",
"articleId": "1I05Bh36uZy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09938388",
"articleId": "1I05BGZpHZ6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1I05BGZpHZ6",
"doi": "10.1109/TVCG.2022.3217305",
"abstract": "Various temporal denoising methods have been proposed to clean up the noise for real-time ray tracing (RTRT). These methods rely on the temporal correspondences of pixels between the current and previous frames, i.e., per-pixel screen-space motion vectors. However, the state-of-the-art temporal reuse methods with traditional motion vectors cause artifacts in motion occlusions. We accordingly propose a novel neural temporal denoising method for indirect illumination of Monte Carlo (MC) ray tracing at 1 sample per pixel. Based on end-to-end multi-scale kernel-based reconstruction, we apply temporally reliable dual motion vectors to facilitate better reconstruction of the occlusions, and also introduce additional motion occlusion loss to reduce ghosting artifacts. Experiments show that our method significantly reduces the over-blurring and ghosting artifacts while generating high-quality images at real-time rates.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Various temporal denoising methods have been proposed to clean up the noise for real-time ray tracing (RTRT). These methods rely on the temporal correspondences of pixels between the current and previous frames, i.e., per-pixel screen-space motion vectors. However, the state-of-the-art temporal reuse methods with traditional motion vectors cause artifacts in motion occlusions. We accordingly propose a novel neural temporal denoising method for indirect illumination of Monte Carlo (MC) ray tracing at 1 sample per pixel. Based on end-to-end multi-scale kernel-based reconstruction, we apply temporally reliable dual motion vectors to facilitate better reconstruction of the occlusions, and also introduce additional motion occlusion loss to reduce ghosting artifacts. Experiments show that our method significantly reduces the over-blurring and ghosting artifacts while generating high-quality images at real-time rates.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Various temporal denoising methods have been proposed to clean up the noise for real-time ray tracing (RTRT). These methods rely on the temporal correspondences of pixels between the current and previous frames, i.e., per-pixel screen-space motion vectors. However, the state-of-the-art temporal reuse methods with traditional motion vectors cause artifacts in motion occlusions. We accordingly propose a novel neural temporal denoising method for indirect illumination of Monte Carlo (MC) ray tracing at 1 sample per pixel. Based on end-to-end multi-scale kernel-based reconstruction, we apply temporally reliable dual motion vectors to facilitate better reconstruction of the occlusions, and also introduce additional motion occlusion loss to reduce ghosting artifacts. Experiments show that our method significantly reduces the over-blurring and ghosting artifacts while generating high-quality images at real-time rates.",
"title": "Neural Temporal Denoising for Indirect Illumination",
"normalizedTitle": "Neural Temporal Denoising for Indirect Illumination",
"fno": "09938388",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Noise Reduction",
"Real Time Systems",
"Lighting",
"Kernel",
"Image Reconstruction",
"Image Color Analysis",
"Rendering Computer Graphics",
"Real Time Ray Tracing",
"Dual Motion Vector",
"Neural Temporal Denoising"
],
"authors": [
{
"givenName": "Yan",
"surname": "Zeng",
"fullName": "Yan Zeng",
"affiliation": "Department of the School of Software, Shandong University, Jinan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lu",
"surname": "Wang",
"fullName": "Lu Wang",
"affiliation": "Department of the School of Software, Shandong University, Jinan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yanning",
"surname": "Xu",
"fullName": "Yanning Xu",
"affiliation": "Department of the School of Software, Shandong University, Jinan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiangxu",
"surname": "Meng",
"fullName": "Xiangxu Meng",
"affiliation": "Department of the School of Software, Shandong University, Jinan, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-11-01 00:00:00",
"pubType": "trans",
"pages": "1-11",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icip/1994/6952/1/00413338",
"title": "Motion estimation and compensation under varying illumination",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1994/00413338/12OmNA14A7M",
"parentPublication": {
"id": "proceedings/icip/1994/6952/3",
"title": "Proceedings of 1st International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2016/4400/0/4400a034",
"title": "Video Denoising Based on Spatial-Temporal Filtering",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2016/4400a034/12OmNAYoKx1",
"parentPublication": {
"id": "proceedings/icdh/2016/4400/0",
"title": "2016 6th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2013/5050/0/5050a064",
"title": "Improved Video Denoising Algorithm Based on Spatial-Temporal Combination",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2013/5050a064/12OmNApLGKy",
"parentPublication": {
"id": "proceedings/icig/2013/5050/0",
"title": "2013 Seventh International Conference on Image and Graphics (ICIG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2002/7402/4/05745375",
"title": "Image sequence restoration in the presence of pathological motion and severe artifacts",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2002/05745375/12OmNrkBwnC",
"parentPublication": {
"id": "proceedings/icassp/2002/7402/4",
"title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118d222",
"title": "Diffuse Mirrors: 3D Reconstruction from Diffuse Indirect Illumination Using Inexpensive Time-of-Flight Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118d222/12OmNzuIjov",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08260916",
"title": "Parallax360: Stereoscopic 360° Scene Representation for Head-Motion Parallax",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08260916/13rRUyp7tX1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09904431",
"title": "Neural Global Illumination: Interactive Indirect Illumination Prediction under Dynamic Area Lights",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09904431/1H0GdxnVnws",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600s8622",
"title": "Modeling Indirect Illumination for Inverse Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600s8622/1H1jdnZPS0g",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/04/09194085",
"title": "Lightweight Bilateral Convolutional Neural Networks for Interactive Single-Bounce Diffuse Indirect Illumination",
"doi": null,
"abstractUrl": "/journal/tg/2022/04/09194085/1n0Ehetbdo4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/05/09279273",
"title": "A Progressive Fusion Generative Adversarial Network for Realistic and Consistent Video Super-Resolution",
"doi": null,
"abstractUrl": "/journal/tp/2022/05/09279273/1pg8t3V4Ico",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09937145",
"articleId": "1I05Bw9xb6o",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09939115",
"articleId": "1I1KuH1xVF6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1HVsnduN8e4",
"doi": "10.1109/TVCG.2022.3218028",
"abstract": "Automatic tooth alignment target prediction is vital in shortening the planning time of orthodontic treatments and aligner designs. Generally, the quality of alignment targets greatly depends on the experience and ability of dentists and has enormous subjective factors. Therefore, many knowledge-driven alignment prediction methods have been proposed to help inexperienced dentists. Unfortunately, existing methods tend to directly regress tooth motion, which lacks clinical interpretability. Tooth anatomical landmarks play a critical role in orthodontics because they are effective in aiding the assessment of whether teeth are in close arrangement and normal occlusion. Thus, we consider anatomical landmark constraints to improve tooth alignment results. In this paper, we present a novel tooth alignment neural network for alignment target predictions based on tooth landmark constraints and a hierarchical graph structure. We detect the landmarks of each tooth first and then construct a hierarchical graph of jaw-tooth-landmark to characterize the relationship between teeth and landmarks. Then, we define the landmark constraints to guide the network to learn the normal occlusion and predict the rigid transformation of each tooth during alignment. Our method achieves better results with the architecture built for tooth data and landmark constraints and has better explainability than previous methods with regard to clinical tooth alignments.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Automatic tooth alignment target prediction is vital in shortening the planning time of orthodontic treatments and aligner designs. Generally, the quality of alignment targets greatly depends on the experience and ability of dentists and has enormous subjective factors. Therefore, many knowledge-driven alignment prediction methods have been proposed to help inexperienced dentists. Unfortunately, existing methods tend to directly regress tooth motion, which lacks clinical interpretability. Tooth anatomical landmarks play a critical role in orthodontics because they are effective in aiding the assessment of whether teeth are in close arrangement and normal occlusion. Thus, we consider anatomical landmark constraints to improve tooth alignment results. In this paper, we present a novel tooth alignment neural network for alignment target predictions based on tooth landmark constraints and a hierarchical graph structure. We detect the landmarks of each tooth first and then construct a hierarchical graph of jaw-tooth-landmark to characterize the relationship between teeth and landmarks. Then, we define the landmark constraints to guide the network to learn the normal occlusion and predict the rigid transformation of each tooth during alignment. Our method achieves better results with the architecture built for tooth data and landmark constraints and has better explainability than previous methods with regard to clinical tooth alignments.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Automatic tooth alignment target prediction is vital in shortening the planning time of orthodontic treatments and aligner designs. Generally, the quality of alignment targets greatly depends on the experience and ability of dentists and has enormous subjective factors. Therefore, many knowledge-driven alignment prediction methods have been proposed to help inexperienced dentists. Unfortunately, existing methods tend to directly regress tooth motion, which lacks clinical interpretability. Tooth anatomical landmarks play a critical role in orthodontics because they are effective in aiding the assessment of whether teeth are in close arrangement and normal occlusion. Thus, we consider anatomical landmark constraints to improve tooth alignment results. In this paper, we present a novel tooth alignment neural network for alignment target predictions based on tooth landmark constraints and a hierarchical graph structure. We detect the landmarks of each tooth first and then construct a hierarchical graph of jaw-tooth-landmark to characterize the relationship between teeth and landmarks. Then, we define the landmark constraints to guide the network to learn the normal occlusion and predict the rigid transformation of each tooth during alignment. Our method achieves better results with the architecture built for tooth data and landmark constraints and has better explainability than previous methods with regard to clinical tooth alignments.",
"title": "Tooth Alignment Network Based on Landmark Constraints and Hierarchical Graph Structure",
"normalizedTitle": "Tooth Alignment Network Based on Landmark Constraints and Hierarchical Graph Structure",
"fno": "09933028",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Teeth",
"Point Cloud Compression",
"Feature Extraction",
"Dentistry",
"Three Dimensional Displays",
"Shape",
"Solid Modeling",
"Orthodontics",
"Tooth Landmark",
"Tooth Alignment",
"Hierarchical Graph Neural Network"
],
"authors": [
{
"givenName": "Chen",
"surname": "Wang",
"fullName": "Chen Wang",
"affiliation": "School of Software, Shandong University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guangshun",
"surname": "Wei",
"fullName": "Guangshun Wei",
"affiliation": "School of Software, Shandong University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guodong",
"surname": "Wei",
"fullName": "Guodong Wei",
"affiliation": "University of HongKong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wenping",
"surname": "Wang",
"fullName": "Wenping Wang",
"affiliation": "Texas A&M University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yuanfeng",
"surname": "Zhou",
"fullName": "Yuanfeng Zhou",
"affiliation": "School of Software, Shandong University, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-10-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2017/1034/0/1034b619",
"title": "Dense Face Alignment",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034b619/12OmNwF0C4S",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2006/2521/1/252110959",
"title": "Shape Alignment by Learning a Landmark-PDM Coupled Model",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2006/252110959/12OmNy7h3cU",
"parentPublication": {
"id": "proceedings/icpr/2006/2521/1",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2015/8302/0/8302a718",
"title": "Use of Tooth Guide Trainer on Dental Students' Training of Shade Matching",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2015/8302a718/12OmNywxlO7",
"parentPublication": {
"id": "proceedings/itme/2015/8302/0",
"title": "2015 7th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmete/2016/3411/0/07938944",
"title": "Detection and Grading Severity of Caries in Dental X-ray Images",
"doi": null,
"abstractUrl": "/proceedings-article/icmete/2016/07938944/12OmNzahc5j",
"parentPublication": {
"id": "proceedings/icmete/2016/3411/0",
"title": "2016 International Conference on Micro-Electronics and Telecommunication Engineering (ICMETE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmete/2016/3411/0/07938993",
"title": "Feature Line Profile Based Automatic Detection of Dental Caries in Bitewing Radiography",
"doi": null,
"abstractUrl": "/proceedings-article/icmete/2016/07938993/12OmNzuZUBc",
"parentPublication": {
"id": "proceedings/icmete/2016/3411/0",
"title": "2016 International Conference on Micro-Electronics and Telecommunication Engineering (ICMETE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/07/08362667",
"title": "3D Tooth Segmentation and Labeling Using Deep Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/journal/tg/2019/07/08362667/13rRUEgs2C4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600u0720",
"title": "DArch: Dental Arch Prior-assisted 3D Tooth Instance Segmentation with Weak Annotations",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600u0720/1H1kFKjFl16",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-dss-smartcity-dependsys/2022/1993/0/199300a749",
"title": "Automatic Individual Tooth Segmentation in Cone-Beam Computed Tomography Based on Multi-Task CNN and Watershed Transform",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-dss-smartcity-dependsys/2022/199300a749/1LSPkbunsdy",
"parentPublication": {
"id": "proceedings/hpcc-dss-smartcity-dependsys/2022/1993/0",
"title": "2022 IEEE 24th Int Conf on High Performance Computing & Communications; 8th Int Conf on Data Science & Systems; 20th Int Conf on Smart City; 8th Int Conf on Dependability in Sensor, Cloud & Big Data Systems & Application (HPCC/DSS/SmartCity/DependSys)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2020/9274/0/927400a164",
"title": "A study on tooth segmentation and numbering using end-to-end deep neural networks",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2020/927400a164/1p2VzkB4pji",
"parentPublication": {
"id": "proceedings/sibgrapi/2020/9274/0",
"title": "2020 33rd SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/10/09445658",
"title": "A Fully Automated Method for 3D Individual Tooth Identification and Segmentation in Dental CBCT",
"doi": null,
"abstractUrl": "/journal/tp/2022/10/09445658/1uaajNYaeQw",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09930626",
"articleId": "1HMOYkaK9Ww",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09937064",
"articleId": "1I05Bh36uZy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1HMOX2J2VMY",
"doi": "10.1109/TVCG.2022.3216919",
"abstract": "Exploring high-dimensional data is a common task in many scientific disciplines. To address this task, two-dimensional embeddings, such as tSNE and UMAP, are widely used. While these determine the 2D position of data items, effectively encoding the first two dimensions, suitable visual encodings can be employed to communicate higher-dimensional features. To investigate such encodings, we have evaluated two commonly used glyph types, namely flower glyphs and star glyphs. To evaluate their capabilities for communicating higher-dimensional features in two-dimensional embeddings, we ran a large set of crowd-sourced user studies using real-world data obtained from data.gov. During these studies, participants completed a broad set of relevant tasks derived from related research. This paper describes the evaluated glyph designs, details our tasks, and the quantitative study setup before discussing the results. Finally, we will present insights and provide guidance on the choice of glyph encodings when exploring high-dimensional data.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Exploring high-dimensional data is a common task in many scientific disciplines. To address this task, two-dimensional embeddings, such as tSNE and UMAP, are widely used. While these determine the 2D position of data items, effectively encoding the first two dimensions, suitable visual encodings can be employed to communicate higher-dimensional features. To investigate such encodings, we have evaluated two commonly used glyph types, namely flower glyphs and star glyphs. To evaluate their capabilities for communicating higher-dimensional features in two-dimensional embeddings, we ran a large set of crowd-sourced user studies using real-world data obtained from data.gov. During these studies, participants completed a broad set of relevant tasks derived from related research. This paper describes the evaluated glyph designs, details our tasks, and the quantitative study setup before discussing the results. Finally, we will present insights and provide guidance on the choice of glyph encodings when exploring high-dimensional data.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Exploring high-dimensional data is a common task in many scientific disciplines. To address this task, two-dimensional embeddings, such as tSNE and UMAP, are widely used. While these determine the 2D position of data items, effectively encoding the first two dimensions, suitable visual encodings can be employed to communicate higher-dimensional features. To investigate such encodings, we have evaluated two commonly used glyph types, namely flower glyphs and star glyphs. To evaluate their capabilities for communicating higher-dimensional features in two-dimensional embeddings, we ran a large set of crowd-sourced user studies using real-world data obtained from data.gov. During these studies, participants completed a broad set of relevant tasks derived from related research. This paper describes the evaluated glyph designs, details our tasks, and the quantitative study setup before discussing the results. Finally, we will present insights and provide guidance on the choice of glyph encodings when exploring high-dimensional data.",
"title": "Out of the Plane: Flower Vs. Star Glyphs to Support High-Dimensional Exploration in Two-Dimensional Embeddings",
"normalizedTitle": "Out of the Plane: Flower Vs. Star Glyphs to Support High-Dimensional Exploration in Two-Dimensional Embeddings",
"fno": "09930144",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Task Analysis",
"Stars",
"Visualization",
"Encoding",
"Data Visualization",
"Dimensionality Reduction",
"Image Color Analysis",
"Glyph Visualization",
"High Dimensional Data Visualization",
"Two Dimensional Embeddings"
],
"authors": [
{
"givenName": "Christian",
"surname": "van Onzenoodt",
"fullName": "Christian van Onzenoodt",
"affiliation": "Visual Computing Group, Ulm University, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Pere-Pau",
"surname": "Vázquez",
"fullName": "Pere-Pau Vázquez",
"affiliation": "ViRVIG Group, UPC Barcelona, Barcelona, Barcelona",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Timo",
"surname": "Ropinski",
"fullName": "Timo Ropinski",
"affiliation": "Visual Computing Group, Ulm University, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-10-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-infovis/2005/2790/0/27900019",
"title": "Multivariate Glyphs for Multi-Object Clusters",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/2005/27900019/12OmNxE2n28",
"parentPublication": {
"id": "proceedings/ieee-infovis/2005/2790/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/07/07445239",
"title": "A Systematic Review of Experimental Studies on Data Glyphs",
"doi": null,
"abstractUrl": "/journal/tg/2017/07/07445239/13rRUNvgz4m",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06875973",
"title": "The Influence of Contour on Similarity Perception of Star Glyphs",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06875973/13rRUwhHcQV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2018/7202/0/720200a058",
"title": "Visualizing Multidimensional Data in Treemaps with Adaptive Glyphs",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2018/720200a058/17D45XeKgvR",
"parentPublication": {
"id": "proceedings/iv/2018/7202/0",
"title": "2018 22nd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a157",
"title": "Evaluation of Effectiveness of Glyphs to Enhance ChronoView",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a157/1cMF9mvWMFO",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/04/08967136",
"title": "Glyphboard: Visual Exploration of High-Dimensional Data Combining Glyphs with Dimensionality Reduction",
"doi": null,
"abstractUrl": "/journal/tg/2020/04/08967136/1gPjxXgWQM0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/09/09067088",
"title": "AgentVis: Visual Analysis of Agent Behavior With Hierarchical Glyphs",
"doi": null,
"abstractUrl": "/journal/tg/2021/09/09067088/1j1lyTz50k0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2022/05/09128033",
"title": "Interpretation of Structural Preservation in Low-Dimensional Embeddings",
"doi": null,
"abstractUrl": "/journal/tk/2022/05/09128033/1l3u8JV5SP6",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552929",
"title": "Attribute-based Explanation of Non-Linear Embeddings of High-Dimensional Data",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552929/1xic3zJwVwI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09557223",
"title": "GlyphCreator: Towards Example-based Automatic Generation of Circular Glyphs",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09557223/1xlvZajdjmo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09928218",
"articleId": "1HJuJYF342Y",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09930626",
"articleId": "1HMOYkaK9Ww",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1HOtlkinUTC",
"name": "ttg555501-09930144s1-access-3216919-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09930144s1-access-3216919-mm.zip",
"extension": "zip",
"size": "27.6 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1HMOYkaK9Ww",
"doi": "10.1109/TVCG.2022.3217266",
"abstract": "This paper presents a monocular projector-camera (procam) system using modular architecture based on relay optics. Conventional coaxial procam systems cannot support (1) online changes to lens settings (zoom and focus) and (2) wide-angle projection mapping. We develop design guidelines for a proposed procam system that would solve these restrictions and address the proposed system's unique technical issue of crosstalk between the camera and projector pixels. We conducted experiments using prototypes to validate the feasibility of the proposed framework. First, we confirmed that the proposed crosstalk reduction technique worked well. Second, we found our technique could achieve correct alignment of a projected image onto a moving surface while changing the zoom and focus of the objective lens. The monocular procam system also achieved radiometric compensation where a surface texture was visually concealed by pixel-wise control of a projection color based on the captured results of offline color pattern projections. Finally, we demonstrated the high expandability of our modular architecture, through the creation of a high dynamic range projection.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a monocular projector-camera (procam) system using modular architecture based on relay optics. Conventional coaxial procam systems cannot support (1) online changes to lens settings (zoom and focus) and (2) wide-angle projection mapping. We develop design guidelines for a proposed procam system that would solve these restrictions and address the proposed system's unique technical issue of crosstalk between the camera and projector pixels. We conducted experiments using prototypes to validate the feasibility of the proposed framework. First, we confirmed that the proposed crosstalk reduction technique worked well. Second, we found our technique could achieve correct alignment of a projected image onto a moving surface while changing the zoom and focus of the objective lens. The monocular procam system also achieved radiometric compensation where a surface texture was visually concealed by pixel-wise control of a projection color based on the captured results of offline color pattern projections. Finally, we demonstrated the high expandability of our modular architecture, through the creation of a high dynamic range projection.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a monocular projector-camera (procam) system using modular architecture based on relay optics. Conventional coaxial procam systems cannot support (1) online changes to lens settings (zoom and focus) and (2) wide-angle projection mapping. We develop design guidelines for a proposed procam system that would solve these restrictions and address the proposed system's unique technical issue of crosstalk between the camera and projector pixels. We conducted experiments using prototypes to validate the feasibility of the proposed framework. First, we confirmed that the proposed crosstalk reduction technique worked well. Second, we found our technique could achieve correct alignment of a projected image onto a moving surface while changing the zoom and focus of the objective lens. The monocular procam system also achieved radiometric compensation where a surface texture was visually concealed by pixel-wise control of a projection color based on the captured results of offline color pattern projections. Finally, we demonstrated the high expandability of our modular architecture, through the creation of a high dynamic range projection.",
"title": "A Monocular Projector-Camera System using Modular Architecture",
"normalizedTitle": "A Monocular Projector-Camera System using Modular Architecture",
"fno": "09930626",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Lenses",
"Cameras",
"Relays",
"Optics",
"Mirrors",
"Optical Imaging",
"Optical Sensors",
"Projector Camera System",
"Augmented Reality",
"Projection Mapping"
],
"authors": [
{
"givenName": "Kenta",
"surname": "Yamamoto",
"fullName": "Kenta Yamamoto",
"affiliation": "Graduate School of Engineering Science, Osaka University, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Daisuke",
"surname": "Iwai",
"fullName": "Daisuke Iwai",
"affiliation": "Graduate School of Engineering Science, Osaka University, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ikuho",
"surname": "Tani",
"fullName": "Ikuho Tani",
"affiliation": "Graduate School of Engineering Science, Osaka University, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kosuke",
"surname": "Sato",
"fullName": "Kosuke Sato",
"affiliation": "Graduate School of Engineering Science, Osaka University, Japan",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-10-01 00:00:00",
"pubType": "trans",
"pages": "1-9",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2007/1179/0/04270475",
"title": "Projector Calibration using Arbitrary Planes and Calibrated Camera",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2007/04270475/12OmNxYtu7r",
"parentPublication": {
"id": "proceedings/cvpr/2007/1179/0",
"title": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391d568",
"title": "Active One-Shot Scan for Wide Depth Range Using a Light Field Projector Based on Coded Aperture",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391d568/12OmNxdm4Cp",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2001/1143/1/00937525",
"title": "Smarter presentations: exploiting homography in camera-projector systems",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2001/00937525/12OmNxwncaw",
"parentPublication": {
"id": "proceedings/iccv/2001/1143/1",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2014/4308/0/4308a449",
"title": "Projection Center Calibration for a Co-located Projector Camera System",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a449/12OmNypIYA4",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2012/2049/0/06266298",
"title": "Inexpensive monocular pico-projector-based augmented reality display for surgical microscope",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2012/06266298/12OmNzvhvy0",
"parentPublication": {
"id": "proceedings/cbms/2012/2049/0",
"title": "2012 25th IEEE International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/06/v1368",
"title": "Registration Techniques for Using Imperfect and Par tially Calibrated Devices in Planar Multi-Projector Displays",
"doi": null,
"abstractUrl": "/journal/tg/2007/06/v1368/13rRUwInvyp",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07014259",
"title": "Extended Depth-of-Field Projector by Fast Focal Sweep Projection",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07014259/13rRUxAASVV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798245",
"title": "Shadowless Projector: Suppressing Shadows in Projection Mapping with Micro Mirror Array Plate",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798245/1cI6ar8DdyE",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/03/08868217",
"title": "Computational Phase-Modulated Eyeglasses",
"doi": null,
"abstractUrl": "/journal/tg/2021/03/08868217/1e7BZyDZnvq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523844",
"title": "Directionally Decomposing Structured Light for Projector Calibration",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523844/1wpqmnzDSzm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09930144",
"articleId": "1HMOX2J2VMY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09933028",
"articleId": "1HVsnduN8e4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1HJuJYF342Y",
"doi": "10.1109/TVCG.2022.3216712",
"abstract": "<italic>Image-warping</italic>, a per-pixel deformation of one image into another, is an essential component in immersive visual experiences such as virtual reality or augmented reality. The primary issue with image warping is disocclusions, where occluded (and hence unknown) parts of the input image would be required to compose the output image. We introduce a new image warping method, <italic>Metameric image inpainting</italic> - an approach for hole-filling in real-time with foundations in human visual perception. Our method estimates image feature statistics of disoccluded regions from their neighbours. These statistics are inpainted and used to synthesise visuals in real-time that are less noticeable to study participants, particularly in peripheral vision. Our method offers speed improvements over the standard structured image inpainting methods while improving realism over colour-based inpainting such as push-pull. Hence, our work paves the way towards future applications such as depth image-based rendering, 6-DoF 360 rendering, and remote render-streaming.",
"abstracts": [
{
"abstractType": "Regular",
"content": "<italic>Image-warping</italic>, a per-pixel deformation of one image into another, is an essential component in immersive visual experiences such as virtual reality or augmented reality. The primary issue with image warping is disocclusions, where occluded (and hence unknown) parts of the input image would be required to compose the output image. We introduce a new image warping method, <italic>Metameric image inpainting</italic> - an approach for hole-filling in real-time with foundations in human visual perception. Our method estimates image feature statistics of disoccluded regions from their neighbours. These statistics are inpainted and used to synthesise visuals in real-time that are less noticeable to study participants, particularly in peripheral vision. Our method offers speed improvements over the standard structured image inpainting methods while improving realism over colour-based inpainting such as push-pull. Hence, our work paves the way towards future applications such as depth image-based rendering, 6-DoF 360 rendering, and remote render-streaming.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Image-warping, a per-pixel deformation of one image into another, is an essential component in immersive visual experiences such as virtual reality or augmented reality. The primary issue with image warping is disocclusions, where occluded (and hence unknown) parts of the input image would be required to compose the output image. We introduce a new image warping method, Metameric image inpainting - an approach for hole-filling in real-time with foundations in human visual perception. Our method estimates image feature statistics of disoccluded regions from their neighbours. These statistics are inpainted and used to synthesise visuals in real-time that are less noticeable to study participants, particularly in peripheral vision. Our method offers speed improvements over the standard structured image inpainting methods while improving realism over colour-based inpainting such as push-pull. Hence, our work paves the way towards future applications such as depth image-based rendering, 6-DoF 360 rendering, and remote render-streaming.",
"title": "Metameric Inpainting for Image Warping",
"normalizedTitle": "Metameric Inpainting for Image Warping",
"fno": "09928218",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Real Time Systems",
"Visualization",
"Rendering Computer Graphics",
"Neural Networks",
"Image Color Analysis",
"Task Analysis",
"Visual Perception",
"Inpainting",
"Warping",
"Perception",
"Real Time Rendering"
],
"authors": [
{
"givenName": "Rafael",
"surname": "Kuffner dos Anjos",
"fullName": "Rafael Kuffner dos Anjos",
"affiliation": "University of Leeds, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "David R.",
"surname": "Walton",
"fullName": "David R. Walton",
"affiliation": "University College London, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kaan",
"surname": "Aksit",
"fullName": "Kaan Aksit",
"affiliation": "University College London, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sebastian",
"surname": "Friston",
"fullName": "Sebastian Friston",
"affiliation": "University College London, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "David",
"surname": "Swapp",
"fullName": "David Swapp",
"affiliation": "University College London, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anthony",
"surname": "Steed",
"fullName": "Anthony Steed",
"affiliation": "University College London, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tobias",
"surname": "Ritschel",
"fullName": "Tobias Ritschel",
"affiliation": "University College London, U.K.",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-10-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2011/348/0/06012182",
"title": "Automatic content creation for multiview autostereoscopic displays using image domain warping",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06012182/12OmNrAdsH9",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2016/5407/0/5407a351",
"title": "Multi-View Inpainting for Image-Based Scene Editing and Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2016/5407a351/12OmNxEjXRB",
"parentPublication": {
"id": "proceedings/3dv/2016/5407/0",
"title": "2016 Fourth International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06011901",
"title": "Stereoscopic image inpainting using scene geometry",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06011901/12OmNyr8YlH",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2013/5004/0/5004a221",
"title": "A Survey on Tangka Image Inpainting",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2013/5004a221/12OmNzcxZv1",
"parentPublication": {
"id": "proceedings/iccis/2013/5004/0",
"title": "2013 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a464",
"title": "Multi-view Inpainting for RGB-D Sequence",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a464/17D45WgziNa",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699212",
"title": "3D PixMix: Image Inpainting in 3D Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699212/19F1PUM1Yk0",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iwecai/2022/7997/0/799700a145",
"title": "GAN Based Image Inpainting Methods: A Taxonomy",
"doi": null,
"abstractUrl": "/proceedings-article/iwecai/2022/799700a145/1CugnMVrJbG",
"parentPublication": {
"id": "proceedings/iwecai/2022/7997/0",
"title": "2022 3rd International Conference on Electronic Communication and Artificial Intelligence (IWECAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956171",
"title": "Interactive Image Inpainting Using Semantic Guidance",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956171/1IHqiFLbCPm",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10038566",
"title": "Content-aware Warping for View Synthesis",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10038566/1KxPVE9pkxG",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900c266",
"title": "TransFill: Reference-guided Image Inpainting by Merging Multiple Color and Spatial Transformations",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900c266/1yeM3LNZkru",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09925645",
"articleId": "1HCQTWI9XgY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09930144",
"articleId": "1HMOX2J2VMY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1HMOWsNEA6I",
"name": "ttg555501-09928218s1-tvcg-3216712-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09928218s1-tvcg-3216712-mm.zip",
"extension": "zip",
"size": "235 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1HCQTWI9XgY",
"doi": "10.1109/TVCG.2022.3216211",
"abstract": "The natural locomotion interface is critical to the development of many VR applications. For household VR applications, there are two basic requirements: natural immersive experience and minimized space occupation. The existing locomotion strategies generally do not simultaneously satisfy these two requirements well. This paper presents a novel omnidirectional treadmill (ODT) system named Hex-Core-MK1 (HCMK1). By implementing two kinds of mirror-symmetrical spiral rollers to generate the omnidirectional velocity field, this proposed system is capable of providing real walking experiences with a full-degree of freedom in an area as small as 1.76 m<sup>2</sup>, while delivering great advantages over several existing ODT systems in terms of weight, volume, latency and dynamic performance. Compared with the sizes of Infinadeck and HCP, the two best motor-driven ODTs so far, the 8 cm height of HCMK1 is only 20% of Infinadeck and 50% of HCP. In addition, HCMK1 is a lightweight device weighing only 110 kg, which provides possibilities for further expanding VR scenarios, such as terrain simulation. The system latency of HCMK1 is only 9ms. The experiments show that HCMK1 can deliver a starting acceleration of 16.00 m/s<sup>2</sup> and a braking acceleration of 30.00 m/s<sup>2</sup>.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The natural locomotion interface is critical to the development of many VR applications. For household VR applications, there are two basic requirements: natural immersive experience and minimized space occupation. The existing locomotion strategies generally do not simultaneously satisfy these two requirements well. This paper presents a novel omnidirectional treadmill (ODT) system named Hex-Core-MK1 (HCMK1). By implementing two kinds of mirror-symmetrical spiral rollers to generate the omnidirectional velocity field, this proposed system is capable of providing real walking experiences with a full-degree of freedom in an area as small as 1.76 m<sup>2</sup>, while delivering great advantages over several existing ODT systems in terms of weight, volume, latency and dynamic performance. Compared with the sizes of Infinadeck and HCP, the two best motor-driven ODTs so far, the 8 cm height of HCMK1 is only 20% of Infinadeck and 50% of HCP. In addition, HCMK1 is a lightweight device weighing only 110 kg, which provides possibilities for further expanding VR scenarios, such as terrain simulation. The system latency of HCMK1 is only 9ms. The experiments show that HCMK1 can deliver a starting acceleration of 16.00 m/s<sup>2</sup> and a braking acceleration of 30.00 m/s<sup>2</sup>.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The natural locomotion interface is critical to the development of many VR applications. For household VR applications, there are two basic requirements: natural immersive experience and minimized space occupation. The existing locomotion strategies generally do not simultaneously satisfy these two requirements well. This paper presents a novel omnidirectional treadmill (ODT) system named Hex-Core-MK1 (HCMK1). By implementing two kinds of mirror-symmetrical spiral rollers to generate the omnidirectional velocity field, this proposed system is capable of providing real walking experiences with a full-degree of freedom in an area as small as 1.76 m2, while delivering great advantages over several existing ODT systems in terms of weight, volume, latency and dynamic performance. Compared with the sizes of Infinadeck and HCP, the two best motor-driven ODTs so far, the 8 cm height of HCMK1 is only 20% of Infinadeck and 50% of HCP. In addition, HCMK1 is a lightweight device weighing only 110 kg, which provides possibilities for further expanding VR scenarios, such as terrain simulation. The system latency of HCMK1 is only 9ms. The experiments show that HCMK1 can deliver a starting acceleration of 16.00 m/s2 and a braking acceleration of 30.00 m/s2.",
"title": "Strolling in Room-Scale VR: Hex-Core-MK1 Omnidirectional Treadmill",
"normalizedTitle": "Strolling in Room-Scale VR: Hex-Core-MK1 Omnidirectional Treadmill",
"fno": "09925645",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Legged Locomotion",
"Aerospace Electronics",
"User Experience",
"Delays",
"Torso",
"Spirals",
"Space Technology",
"Omnidirectional Treadmill",
"Locomotion Devices",
"Locomotion Interfaces",
"Room Scale VR"
],
"authors": [
{
"givenName": "Ziyao",
"surname": "Wang",
"fullName": "Ziyao Wang",
"affiliation": "Southeast University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chiyi",
"surname": "Liu",
"fullName": "Chiyi Liu",
"affiliation": "Southeast University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jialiang",
"surname": "Chen",
"fullName": "Jialiang Chen",
"affiliation": "Southeast University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yao",
"surname": "Yao",
"fullName": "Yao Yao",
"affiliation": "Southeast University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dazheng",
"surname": "Fang",
"fullName": "Dazheng Fang",
"affiliation": "Southeast University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhiyi",
"surname": "Shi",
"fullName": "Zhiyi Shi",
"affiliation": "Southeast University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rui",
"surname": "Yan",
"fullName": "Rui Yan",
"affiliation": "Southeast University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yiye",
"surname": "Wang",
"fullName": "Yiye Wang",
"affiliation": "Southeast University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "KanJian",
"surname": "Zhang",
"fullName": "KanJian Zhang",
"affiliation": "Southeast University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hai",
"surname": "Wang",
"fullName": "Hai Wang",
"affiliation": "Saint Mary's University, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haikun",
"surname": "Wei",
"fullName": "Haikun Wei",
"affiliation": "Southeast University, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-10-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892348",
"title": "Steering locomotion by vestibular perturbation in room-scale VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892348/12OmNvrMUgU",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446177",
"title": "You Shall Not Pass: Non-Intrusive Feedback for Virtual Walls in VR Environments with Room-Scale Mapping",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446177/13bd1eSlyu1",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09744001",
"title": "Influence of user posture and virtual exercise on impression of locomotion during VR observation",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09744001/1C8BFV420lq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09761724",
"title": "Effects of Transfer Functions and Body Parts on Body-centric Locomotion in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09761724/1CKMkLCKOSk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10049511",
"title": "Redirected Walking On Omnidirectional Treadmill",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10049511/1KYoAYFd0m4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089561",
"title": "Real Walking in Place: HEX-CORE-PROTOTYPE Omnidirectional Treadmill",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089561/1jIxfncHjNe",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090608",
"title": "Towards an Affordance of Embodied Locomotion Interfaces in VR: How to Know How to Move?",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090608/1jIxnjPP9Ti",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090549",
"title": "Omnidirectional Motion Input: The Basis of Natural Interaction in Room-Scale Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090549/1jIxttGQGKk",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a346",
"title": "Spring Stepper: A Seated VR Locomotion Controller",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a346/1oZBBswUSzK",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/03/09629264",
"title": "Leaning-Based Interfaces Improve Ground-Based VR Locomotion in Reach-the-Target, Follow-the-Path, and Racing Tasks",
"doi": null,
"abstractUrl": "/journal/tg/2023/03/09629264/1yXvJdO9qaQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09920542",
"articleId": "1HxSntuIBnW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09928218",
"articleId": "1HJuJYF342Y",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1HGJ5LLMdHi",
"name": "ttg555501-09925645s1-supp3-3216211.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09925645s1-supp3-3216211.mp4",
"extension": "mp4",
"size": "94.4 MB",
"__typename": "WebExtraType"
},
{
"id": "1HGJ6lniEh2",
"name": "ttg555501-09925645s1-supp1-3216211.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09925645s1-supp1-3216211.mp4",
"extension": "mp4",
"size": "14.1 MB",
"__typename": "WebExtraType"
},
{
"id": "1HGJ5DJCcq4",
"name": "ttg555501-09925645s1-supp2-3216211.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09925645s1-supp2-3216211.mp4",
"extension": "mp4",
"size": "34.7 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1HxSlQBsfYc",
"doi": "10.1109/TVCG.2022.3214821",
"abstract": "The objective of this work is to develop error-bounded lossy compression methods to preserve topological features in 2D and 3D vector fields. Specifically, we explore the preservation of critical points in piecewise linear and bilinear vector fields. We define the preservation of critical points as, without any false positive, false negative, or false type in the decompressed data, (1) keeping each critical point in its original cell and (2) retaining the type of each critical point (e.g., saddle and attracting node). The key to our method is to adapt a vertex-wise error bound for each grid point and to compress input data together with the error bound field using a modified lossy compressor. Our compression algorithm can be also embarrassingly parallelized for large data handling and in situ processing. We benchmark our method by comparing it with existing lossy compressors in terms of false positive/negative/type rates, compression ratio, and various vector field visualizations with several scientific applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The objective of this work is to develop error-bounded lossy compression methods to preserve topological features in 2D and 3D vector fields. Specifically, we explore the preservation of critical points in piecewise linear and bilinear vector fields. We define the preservation of critical points as, without any false positive, false negative, or false type in the decompressed data, (1) keeping each critical point in its original cell and (2) retaining the type of each critical point (e.g., saddle and attracting node). The key to our method is to adapt a vertex-wise error bound for each grid point and to compress input data together with the error bound field using a modified lossy compressor. Our compression algorithm can be also embarrassingly parallelized for large data handling and in situ processing. We benchmark our method by comparing it with existing lossy compressors in terms of false positive/negative/type rates, compression ratio, and various vector field visualizations with several scientific applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The objective of this work is to develop error-bounded lossy compression methods to preserve topological features in 2D and 3D vector fields. Specifically, we explore the preservation of critical points in piecewise linear and bilinear vector fields. We define the preservation of critical points as, without any false positive, false negative, or false type in the decompressed data, (1) keeping each critical point in its original cell and (2) retaining the type of each critical point (e.g., saddle and attracting node). The key to our method is to adapt a vertex-wise error bound for each grid point and to compress input data together with the error bound field using a modified lossy compressor. Our compression algorithm can be also embarrassingly parallelized for large data handling and in situ processing. We benchmark our method by comparing it with existing lossy compressors in terms of false positive/negative/type rates, compression ratio, and various vector field visualizations with several scientific applications.",
"title": "Toward Feature-Preserving Vector Field Compression",
"normalizedTitle": "Toward Feature-Preserving Vector Field Compression",
"fno": "09920175",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Three Dimensional Displays",
"Topology",
"Iterative Methods",
"Data Visualization",
"Image Coding",
"Error Correction",
"Jacobian Matrices",
"Critical Points",
"Lossy Compression",
"Vector Field Visualization"
],
"authors": [
{
"givenName": "Xin",
"surname": "Liang",
"fullName": "Xin Liang",
"affiliation": "Department of Computer Science, Missouri University of Science and Technology, Rolla, MO, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sheng",
"surname": "Di",
"fullName": "Sheng Di",
"affiliation": "Mathematics and Computer Science Division, Argonne National Laboratory, Lemont, IL, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Franck",
"surname": "Cappello",
"fullName": "Franck Cappello",
"affiliation": "Mathematics and Computer Science Division, Argonne National Laboratory, Lemont, IL, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mukund",
"surname": "Raj",
"fullName": "Mukund Raj",
"affiliation": "Stanley Center for Psychiatric Research, Broad Institute of MIT and Harvard, Cambridge, MA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chunhui",
"surname": "Liu",
"fullName": "Chunhui Liu",
"affiliation": "Department of Mathematics, Kyoto University, Kyoto, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kenji",
"surname": "Ono",
"fullName": "Kenji Ono",
"affiliation": "Department of Informatics, Kyushu University, Fukuoka, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zizhong",
"surname": "Chen",
"fullName": "Zizhong Chen",
"affiliation": "Department of Computer Science and Engineering, University of California, Riverside, Riverside, CA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tom",
"surname": "Peterka",
"fullName": "Tom Peterka",
"affiliation": "Mathematics and Computer Science Division, Argonne National Laboratory, Lemont, IL, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hanqi",
"surname": "Guo",
"fullName": "Hanqi Guo",
"affiliation": "Department of Computer Science and Engineering, The Ohio State University, Columbus, OH, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-10-01 00:00:00",
"pubType": "trans",
"pages": "1-16",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icdm/2012/4905/0/4905a509",
"title": "Community Preserving Lossy Compression of Social Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2012/4905a509/12OmNBEpnwf",
"parentPublication": {
"id": "proceedings/icdm/2012/4905/0",
"title": "2012 IEEE 12th International Conference on Data Mining",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/lcn/2011/926/0/06115508",
"title": "Efficient temporal compression in wireless sensor networks",
"doi": null,
"abstractUrl": "/proceedings-article/lcn/2011/06115508/12OmNroij1Y",
"parentPublication": {
"id": "proceedings/lcn/2011/926/0",
"title": "2011 IEEE 36th Conference on Local Computer Networks",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2018/1424/0/142401a046",
"title": "Topologically Controlled Lossy Compression",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2018/142401a046/12OmNx8fi9c",
"parentPublication": {
"id": "proceedings/pacificvis/2018/1424/0",
"title": "2018 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2019/02/08421751",
"title": "Efficient Lossy Compression for Scientific Data Based on Pointwise Relative Error Bound",
"doi": null,
"abstractUrl": "/journal/td/2019/02/08421751/17D45XuDNFo",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900a368",
"title": "Lost in Compression: the Impact of Lossy Image Compression on Variable Size Object Detection within Infrared Imagery",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900a368/1G577zm83ZK",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/drbsd/2022/6337/0/633700a044",
"title": "Understanding Impact of Lossy Compression on Derivative-related Metrics in Scientific Datasets",
"doi": null,
"abstractUrl": "/proceedings-article/drbsd/2022/633700a044/1KhjGps56H6",
"parentPublication": {
"id": "proceedings/drbsd/2022/6337/0",
"title": "2022 IEEE/ACM 8th International Workshop on Data Analysis and Reduction for Big Scientific Data (DRBSD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2020/5697/0/09086223",
"title": "Toward Feature-Preserving 2D and 3D Vector Field Compression",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2020/09086223/1kuHmCb4z2E",
"parentPublication": {
"id": "proceedings/pacificvis/2020/5697/0",
"title": "2020 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipdps/2020/6876/0/09139812",
"title": "FRaZ: A Generic High-Fidelity Fixed-Ratio Lossy Compression Framework for Scientific Floating-point Data",
"doi": null,
"abstractUrl": "/proceedings-article/ipdps/2020/09139812/1lss8nVQuis",
"parentPublication": {
"id": "proceedings/ipdps/2020/6876/0",
"title": "2020 IEEE International Parallel and Distributed Processing Symposium (IPDPS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800g637",
"title": "Learning Better Lossless Compression Using Lossy Compression",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800g637/1m3oqekUl5S",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/correctness/2020/1044/0/104400a001",
"title": "Correctness-preserving Compression of Datasets and Neural Network Models",
"doi": null,
"abstractUrl": "/proceedings-article/correctness/2020/104400a001/1pLJKSCgMpi",
"parentPublication": {
"id": "proceedings/correctness/2020/1044/0",
"title": "2020 IEEE/ACM 4th International Workshop on Software Correctness for HPC Applications (Correctness)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09919390",
"articleId": "1HsTAyyKsne",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09920233",
"articleId": "1HxSmJQqfqE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1HxSmJQqfqE",
"doi": "10.1109/TVCG.2022.3214836",
"abstract": "Selecting views is one of the most common but overlooked procedures in topics related to 3D scenes. Typically, existing applications and researchers manually select views through a trial-and-error process or “preset” a direction, such as the top-down views. For example, literature for scene synthesis requires views for visualizing scenes. Research on panorama and VR also require initial placements for cameras, etc. This paper presents SceneViewer, an integrated system for automatic view selections. Our system is achieved by applying rules of interior photography, which guides potential views and seeks better views. Through experiments and applications, we show the potentiality and novelty of the proposed method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Selecting views is one of the most common but overlooked procedures in topics related to 3D scenes. Typically, existing applications and researchers manually select views through a trial-and-error process or “preset” a direction, such as the top-down views. For example, literature for scene synthesis requires views for visualizing scenes. Research on panorama and VR also require initial placements for cameras, etc. This paper presents SceneViewer, an integrated system for automatic view selections. Our system is achieved by applying rules of interior photography, which guides potential views and seeks better views. Through experiments and applications, we show the potentiality and novelty of the proposed method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Selecting views is one of the most common but overlooked procedures in topics related to 3D scenes. Typically, existing applications and researchers manually select views through a trial-and-error process or “preset” a direction, such as the top-down views. For example, literature for scene synthesis requires views for visualizing scenes. Research on panorama and VR also require initial placements for cameras, etc. This paper presents SceneViewer, an integrated system for automatic view selections. Our system is achieved by applying rules of interior photography, which guides potential views and seeks better views. Through experiments and applications, we show the potentiality and novelty of the proposed method.",
"title": "SceneViewer: Automating Residential Photography in Virtual Environments",
"normalizedTitle": "SceneViewer: Automating Residential Photography in Virtual Environments",
"fno": "09920233",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Three Dimensional Displays",
"Cameras",
"Photography",
"Shape",
"Probes",
"Solid Modeling",
"Rendering Computer Graphics",
"3 D Interior Scene",
"Interior Photography",
"View Selection"
],
"authors": [
{
"givenName": "Shao-Kui",
"surname": "Zhang",
"fullName": "Shao-Kui Zhang",
"affiliation": "Department of Computer Science and Technology, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hou",
"surname": "Tam",
"fullName": "Hou Tam",
"affiliation": "Department of Computer Science and Technology, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yi-Xiao",
"surname": "Li",
"fullName": "Yi-Xiao Li",
"affiliation": "Academy of Arts & Design, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tai-Jiang",
"surname": "Mu",
"fullName": "Tai-Jiang Mu",
"affiliation": "Department of Computer Science and Technology, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Song-Hai",
"surname": "Zhang",
"fullName": "Song-Hai Zhang",
"affiliation": "Department of Computer Science and Technology, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-10-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icig/2004/2244/0/01410494",
"title": "An efficient image-based virtual tour system",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2004/01410494/12OmNqG0SWz",
"parentPublication": {
"id": "proceedings/icig/2004/2244/0",
"title": "Proceedings. Third International Conference on Image and Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2016/7258/0/07552858",
"title": "Depth augmented stereo panorama for cinematic virtual reality with head-motion parallax",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2016/07552858/12OmNs0TKW6",
"parentPublication": {
"id": "proceedings/icme/2016/7258/0",
"title": "2016 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcabes/2017/2162/0/2162a187",
"title": "The Design and Implementation of Harbor Panoramic Browsing System Based on Image Based Rendering Technology",
"doi": null,
"abstractUrl": "/proceedings-article/dcabes/2017/2162a187/12OmNxTVU0S",
"parentPublication": {
"id": "proceedings/dcabes/2017/2162/0",
"title": "2017 16th International Symposium on Distributed Computing and Applications to Business, Engineering and Science (DCABES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2014/6854/0/6854a208",
"title": "Streaming Location-Based Panorama Videos into Augmented Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2014/6854a208/12OmNzwZ6x8",
"parentPublication": {
"id": "proceedings/icvrv/2014/6854/0",
"title": "2014 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699207",
"title": "Browsing Spatial Photography Using Augmented Models",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699207/19F1RMZ48XC",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900b286",
"title": "NTIRE 2022 Challenge on Night Photography Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900b286/1G57anvT9XW",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600s8398",
"title": "Neural Point Light Fields",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600s8398/1H1kUbIJXgY",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093478",
"title": "Style Transfer for Light Field Photography",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093478/1jPbzdrvv8Y",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800i025",
"title": "3D Photography Using Context-Aware Layered Depth Inpainting",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800i025/1m3opTjsjNC",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a145",
"title": "3D Photography with One-shot Portrait Relighting",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a145/1yfxMqwo4Le",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09920175",
"articleId": "1HxSlQBsfYc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09920664",
"articleId": "1HxSn2tq3Ha",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1HCQUih6DUQ",
"name": "ttg555501-09920233s1-supp2-3214836.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09920233s1-supp2-3214836.pdf",
"extension": "pdf",
"size": "9.11 MB",
"__typename": "WebExtraType"
},
{
"id": "1HCQULKybkc",
"name": "ttg555501-09920233s1-supp3-3214836.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09920233s1-supp3-3214836.mp4",
"extension": "mp4",
"size": "211 MB",
"__typename": "WebExtraType"
},
{
"id": "1HCQVmdINu8",
"name": "ttg555501-09920233s1-supp1-3214836.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09920233s1-supp1-3214836.pdf",
"extension": "pdf",
"size": "32.1 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1HxSntuIBnW",
"doi": "10.1109/TVCG.2022.3214420",
"abstract": "We present a novel technique for hierarchical super resolution (SR) with neural networks (NNs), which upscales volumetric data represented with an octree data structure to a high-resolution uniform grid with minimal seam artifacts on octree node boundaries. Our method uses existing state-of-the-art SR models and adds flexibility to upscale input data with varying levels of detail across the domain, instead of only uniform grid data that are supported in previous approaches. The key is to use a hierarchy of SR NNs, each trained to perform <inline-formula><tex-math notation=\"LaTeX\">Z_$2\\times$_Z</tex-math></inline-formula> SR between two levels of detail, with a hierarchical SR algorithm that minimizes seam artifacts by starting from the coarsest level of detail and working up. We show that our hierarchical approach outperforms baseline interpolation and hierarchical upscaling methods, and demonstrate the usefulness of our proposed approach across three use cases including data reduction using hierarchical downsampling+SR instead of uniform downsampling+SR, computation savings for hierarchical finite-time Lyapunov exponent field calculation, and super-resolving low-resolution simulation results for a high-resolution approximation visualization.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a novel technique for hierarchical super resolution (SR) with neural networks (NNs), which upscales volumetric data represented with an octree data structure to a high-resolution uniform grid with minimal seam artifacts on octree node boundaries. Our method uses existing state-of-the-art SR models and adds flexibility to upscale input data with varying levels of detail across the domain, instead of only uniform grid data that are supported in previous approaches. The key is to use a hierarchy of SR NNs, each trained to perform <inline-formula><tex-math notation=\"LaTeX\">$2\\times$</tex-math></inline-formula> SR between two levels of detail, with a hierarchical SR algorithm that minimizes seam artifacts by starting from the coarsest level of detail and working up. We show that our hierarchical approach outperforms baseline interpolation and hierarchical upscaling methods, and demonstrate the usefulness of our proposed approach across three use cases including data reduction using hierarchical downsampling+SR instead of uniform downsampling+SR, computation savings for hierarchical finite-time Lyapunov exponent field calculation, and super-resolving low-resolution simulation results for a high-resolution approximation visualization.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a novel technique for hierarchical super resolution (SR) with neural networks (NNs), which upscales volumetric data represented with an octree data structure to a high-resolution uniform grid with minimal seam artifacts on octree node boundaries. Our method uses existing state-of-the-art SR models and adds flexibility to upscale input data with varying levels of detail across the domain, instead of only uniform grid data that are supported in previous approaches. The key is to use a hierarchy of SR NNs, each trained to perform - SR between two levels of detail, with a hierarchical SR algorithm that minimizes seam artifacts by starting from the coarsest level of detail and working up. We show that our hierarchical approach outperforms baseline interpolation and hierarchical upscaling methods, and demonstrate the usefulness of our proposed approach across three use cases including data reduction using hierarchical downsampling+SR instead of uniform downsampling+SR, computation savings for hierarchical finite-time Lyapunov exponent field calculation, and super-resolving low-resolution simulation results for a high-resolution approximation visualization.",
"title": "Deep Hierarchical Super Resolution for Scientific Data",
"normalizedTitle": "Deep Hierarchical Super Resolution for Scientific Data",
"fno": "09920542",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Spatial Resolution",
"Rendering Computer Graphics",
"Octrees",
"Data Models",
"Computational Modeling",
"Interpolation",
"Isosurfaces"
],
"authors": [
{
"givenName": "Skylar W.",
"surname": "Wurster",
"fullName": "Skylar W. Wurster",
"affiliation": "Department of Computer Science and Engineering, The Ohio State University, Columbus, OH, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hanqi",
"surname": "Guo",
"fullName": "Hanqi Guo",
"affiliation": "Department of Computer Science and Engineering, The Ohio State University, Columbus, OH, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Han-Wei",
"surname": "Shen",
"fullName": "Han-Wei Shen",
"affiliation": "Department of Computer Science and Engineering, The Ohio State University, Columbus, OH, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tom",
"surname": "Peterka",
"fullName": "Tom Peterka",
"affiliation": "Mathematics and Computer Science Division, Argonne National Laboratory, Lemont, IL, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jiayi",
"surname": "Xu",
"fullName": "Jiayi Xu",
"affiliation": "Department of Computer Science and Engineering, The Ohio State University, Columbus, OH, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-10-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2017/0733/0/0733b100",
"title": "Deep Wavelet Prediction for Image Super-Resolution",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2017/0733b100/12OmNwF0C7i",
"parentPublication": {
"id": "proceedings/cvprw/2017/0733/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000b664",
"title": "Deep Back-Projection Networks for Super-Resolution",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000b664/17D45WKWnHX",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bdcat/2022/6090/0/609000a048",
"title": "Enhanced Deep Learning Super-Resolution for Bathymetry Data",
"doi": null,
"abstractUrl": "/proceedings-article/bdcat/2022/609000a048/1Lu4d5cQVi0",
"parentPublication": {
"id": "proceedings/bdcat/2022/6090/0",
"title": "2022 IEEE/ACM International Conference on Big Data Computing, Applications and Technologies (BDCAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/06/08918030",
"title": "Volumetric Isosurface Rendering with Deep Learning-Based Super-Resolution",
"doi": null,
"abstractUrl": "/journal/tg/2021/06/08918030/1fm1QUuzRAI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300b652",
"title": "Camera Lens Super-Resolution",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300b652/1gyrgQ8SE2k",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2019/2506/0/250600a505",
"title": "RUNet: A Robust UNet Architecture for Image Super-Resolution",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2019/250600a505/1iTvgueXtQI",
"parentPublication": {
"id": "proceedings/cvprw/2019/2506/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2019/2506/0/250600b804",
"title": "Light Field Super-Resolution: A Benchmark",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2019/250600b804/1iTvo7kjJFm",
"parentPublication": {
"id": "proceedings/cvprw/2019/2506/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigmm/2020/9325/0/09232612",
"title": "Fusion of Deep and Non-Deep Methods for Fast Super-Resolution of Satellite Images",
"doi": null,
"abstractUrl": "/proceedings-article/bigmm/2020/09232612/1o56CWDsUCs",
"parentPublication": {
"id": "proceedings/bigmm/2020/9325/0",
"title": "2020 IEEE Sixth International Conference on Multimedia Big Data (BigMM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/11/09521710",
"title": "Toward Real-World Super-Resolution via Adaptive Downsampling Models",
"doi": null,
"abstractUrl": "/journal/tp/2022/11/09521710/1wkrnHyoWAw",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2021/4899/0/489900a453",
"title": "KernelNet: A Blind Super-Resolution Kernel Estimation Network",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2021/489900a453/1yVzRwbSFYk",
"parentPublication": {
"id": "proceedings/cvprw/2021/4899/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09920664",
"articleId": "1HxSn2tq3Ha",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09925645",
"articleId": "1HCQTWI9XgY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1HxSn2tq3Ha",
"doi": "10.1109/TVCG.2022.3215070",
"abstract": "Declarative grammar is becoming an increasingly important technique for understanding visualization design spaces. The GoTreeScape system presented in the paper allows users to navigate and explore the vast design space implied by GoTree, a declarative grammar for visualizing tree structures. To provide an overview of the design space, GoTreeScape, which is based on an encoder-decoder architecture, projects the tree visualizations onto a 2D landscape. Significantly, this landscape takes the relationships between different design features into account. GoTreeScape also includes an exploratory framework that allows top-down, bottom-up, and hybrid modes of exploration to support the inherently undirected nature of exploratory searches. Two case studies demonstrate the diversity with which GoTreeScape expands the universe of designed tree visualizations for users. The source code associated with GoTreeScape is available at <uri>https://github.com/bitvis2021/gotreescape.</uri>",
"abstracts": [
{
"abstractType": "Regular",
"content": "Declarative grammar is becoming an increasingly important technique for understanding visualization design spaces. The GoTreeScape system presented in the paper allows users to navigate and explore the vast design space implied by GoTree, a declarative grammar for visualizing tree structures. To provide an overview of the design space, GoTreeScape, which is based on an encoder-decoder architecture, projects the tree visualizations onto a 2D landscape. Significantly, this landscape takes the relationships between different design features into account. GoTreeScape also includes an exploratory framework that allows top-down, bottom-up, and hybrid modes of exploration to support the inherently undirected nature of exploratory searches. Two case studies demonstrate the diversity with which GoTreeScape expands the universe of designed tree visualizations for users. The source code associated with GoTreeScape is available at <uri>https://github.com/bitvis2021/gotreescape.</uri>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Declarative grammar is becoming an increasingly important technique for understanding visualization design spaces. The GoTreeScape system presented in the paper allows users to navigate and explore the vast design space implied by GoTree, a declarative grammar for visualizing tree structures. To provide an overview of the design space, GoTreeScape, which is based on an encoder-decoder architecture, projects the tree visualizations onto a 2D landscape. Significantly, this landscape takes the relationships between different design features into account. GoTreeScape also includes an exploratory framework that allows top-down, bottom-up, and hybrid modes of exploration to support the inherently undirected nature of exploratory searches. Two case studies demonstrate the diversity with which GoTreeScape expands the universe of designed tree visualizations for users. The source code associated with GoTreeScape is available at https://github.com/bitvis2021/gotreescape.",
"title": "GoTreeScape: Navigate and Explore the Tree Visualization Design Space",
"normalizedTitle": "GoTreeScape: Navigate and Explore the Tree Visualization Design Space",
"fno": "09920664",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visualization",
"Data Visualization",
"Layout",
"Space Exploration",
"Grammar",
"Navigation",
"Shape",
"Tree Visualization",
"Design Space Exploration",
"Deep Learning"
],
"authors": [
{
"givenName": "Guozheng",
"surname": "Li",
"fullName": "Guozheng Li",
"affiliation": "School of Computer Science and Technology, Beijing Institute of Technology, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaoru",
"surname": "Yuan",
"fullName": "Xiaoru Yuan",
"affiliation": "Key Laboratory of Machine Perception (Ministry of Education), School of AI, Peking University, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-10-01 00:00:00",
"pubType": "trans",
"pages": "1-17",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/estimedia/2009/5169/0/05336816",
"title": "System-level MP-SoC design space exploration using tree visualization",
"doi": null,
"abstractUrl": "/proceedings-article/estimedia/2009/05336816/12OmNxecRQ4",
"parentPublication": {
"id": "proceedings/estimedia/2009/5169/0",
"title": "2009 IEEE/ACM/IFIP 7th Workshop on Embedded Systems for Real-Time Multimedia. ESTIMedia 2009",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2014/4103/0/4103a094",
"title": "FacetScape: A Visualization for Exploring the Search Space",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2014/4103a094/12OmNzVXNZB",
"parentPublication": {
"id": "proceedings/iv/2014/4103/0",
"title": "2014 18th International Conference on Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cit-iucc-dasc-picom/2015/0154/0/07363377",
"title": "The Mobile Tree Browser: A Space Filling Information Visualization for Browsing Labelled Hierarchies on Mobile Devices",
"doi": null,
"abstractUrl": "/proceedings-article/cit-iucc-dasc-picom/2015/07363377/12OmNzaQoa1",
"parentPublication": {
"id": "proceedings/cit-iucc-dasc-picom/2015/0154/0",
"title": "2015 IEEE International Conference on Computer and Information Technology; Ubiquitous Computing and Communications; Dependable, Autonomic and Secure Computing; Pervasive Intelligence and Computing (CIT/IUCC/DASC/PICOM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/03/08468065",
"title": "P4: Portable Parallel Processing Pipelines for Interactive Information Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2020/03/08468065/13HFz2XZAUp",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08233127",
"title": "Atom: A Grammar for Unit Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08233127/14H4WLzSYsE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440063",
"title": "A Declarative Grammar of Flexible Volume Visualization Pipelines",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440063/17D45XacGi1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08809730",
"title": "P5: Portable Progressive Parallel Processing Pipelines for Interactive Data Analysis and Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08809730/1cHE2tYwF7a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222038",
"title": "Kyrix-S: Authoring Scalable Scatterplot Visualizations of Big Data",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222038/1nTq1lYLbEY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2020/8014/0/801400a131",
"title": "Encodable: Configurable Grammar for Visualization Components",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2020/801400a131/1qRNXTuFymI",
"parentPublication": {
"id": "proceedings/vis/2020/8014/0",
"title": "2020 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09557192",
"title": "Gosling: A Grammar-based Toolkit for Scalable and Interactive Genomics Data Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09557192/1xlw1UFWxDa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09920233",
"articleId": "1HxSmJQqfqE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09920542",
"articleId": "1HxSntuIBnW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1HzxbDFluGk",
"name": "ttg555501-09920664s1-supp1-3215070.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09920664s1-supp1-3215070.pdf",
"extension": "pdf",
"size": "16.4 MB",
"__typename": "WebExtraType"
},
{
"id": "1Hzx9D4Ro0E",
"name": "ttg555501-09920664s1-supp2-3215070.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09920664s1-supp2-3215070.mp4",
"extension": "mp4",
"size": "188 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1HsTAyyKsne",
"doi": "10.1109/TVCG.2022.3214771",
"abstract": "Guidelines for color use in quantitative visualizations have strongly discouraged the use of rainbow colormaps, arguing instead for smooth designs that do not induce visual discontinuities or implicit color categories. However, the empirical evidence behind this argument has been mixed and, at times, even contradictory. In practice, rainbow colormaps are widely used, raising questions about the true utility or dangers of such designs. We study how color categorization impacts the interpretation of scalar fields. We first introduce an approach to detect latent categories in colormaps. We hypothesize that the appearance of color categories in scalar visualizations can be beneficial in that they enhance the perception of certain features, although at the cost of rendering other features less noticeable. In three crowdsourced experiments, we show that observers are more likely to discriminate global, distributional features when viewing colorful scales that induce categorization (e.g., rainbow or diverging schemes). Conversely, when seeing the same data through a less colorful representation, observers are more likely to report localized features defined by small variations in the data. Participants showed awareness of these different affordances, and exhibited bias for exploiting the more discriminating colormap, given a particular feature type. Our results demonstrate costs and benefits for rainbows (and similarly colorful schemes), suggesting that their complementary utility for analyzing scalar data should not be dismissed. In addition to explaining potentially valid uses of rainbow, our study provides actionable guidelines, including on when such designs can be more harmful than useful. Data and materials are available at <uri>https://osf.io/xjhtf</uri>",
"abstracts": [
{
"abstractType": "Regular",
"content": "Guidelines for color use in quantitative visualizations have strongly discouraged the use of rainbow colormaps, arguing instead for smooth designs that do not induce visual discontinuities or implicit color categories. However, the empirical evidence behind this argument has been mixed and, at times, even contradictory. In practice, rainbow colormaps are widely used, raising questions about the true utility or dangers of such designs. We study how color categorization impacts the interpretation of scalar fields. We first introduce an approach to detect latent categories in colormaps. We hypothesize that the appearance of color categories in scalar visualizations can be beneficial in that they enhance the perception of certain features, although at the cost of rendering other features less noticeable. In three crowdsourced experiments, we show that observers are more likely to discriminate global, distributional features when viewing colorful scales that induce categorization (e.g., rainbow or diverging schemes). Conversely, when seeing the same data through a less colorful representation, observers are more likely to report localized features defined by small variations in the data. Participants showed awareness of these different affordances, and exhibited bias for exploiting the more discriminating colormap, given a particular feature type. Our results demonstrate costs and benefits for rainbows (and similarly colorful schemes), suggesting that their complementary utility for analyzing scalar data should not be dismissed. In addition to explaining potentially valid uses of rainbow, our study provides actionable guidelines, including on when such designs can be more harmful than useful. Data and materials are available at <uri>https://osf.io/xjhtf</uri>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Guidelines for color use in quantitative visualizations have strongly discouraged the use of rainbow colormaps, arguing instead for smooth designs that do not induce visual discontinuities or implicit color categories. However, the empirical evidence behind this argument has been mixed and, at times, even contradictory. In practice, rainbow colormaps are widely used, raising questions about the true utility or dangers of such designs. We study how color categorization impacts the interpretation of scalar fields. We first introduce an approach to detect latent categories in colormaps. We hypothesize that the appearance of color categories in scalar visualizations can be beneficial in that they enhance the perception of certain features, although at the cost of rendering other features less noticeable. In three crowdsourced experiments, we show that observers are more likely to discriminate global, distributional features when viewing colorful scales that induce categorization (e.g., rainbow or diverging schemes). Conversely, when seeing the same data through a less colorful representation, observers are more likely to report localized features defined by small variations in the data. Participants showed awareness of these different affordances, and exhibited bias for exploiting the more discriminating colormap, given a particular feature type. Our results demonstrate costs and benefits for rainbows (and similarly colorful schemes), suggesting that their complementary utility for analyzing scalar data should not be dismissed. In addition to explaining potentially valid uses of rainbow, our study provides actionable guidelines, including on when such designs can be more harmful than useful. Data and materials are available at https://osf.io/xjhtf",
"title": "Rainbow Colormaps: What are they <italic>good</italic> and <italic>bad</italic> for?",
"normalizedTitle": "Rainbow Colormaps: What are they good and bad for?",
"fno": "09919390",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Image Color Analysis",
"Task Analysis",
"Encoding",
"Costs",
"Observers",
"Standards",
"Sensitivity",
"Quantitative Color Encoding",
"Rainbow Colormaps",
"Scalar Fields",
"Perception"
],
"authors": [
{
"givenName": "Khairi",
"surname": "Reda",
"fullName": "Khairi Reda",
"affiliation": "Indiana University–Purdue University Indianapolis, Indianapolis, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-10-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2018/01/08017653",
"title": "The Good, the Bad, and the Ugly: A Theoretical Framework for the Assessment of Continuous Colormaps",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017653/13rRUNvgz9W",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2007/02/mcg2007020014",
"title": "Rainbow Color Map (Still) Considered Harmful",
"doi": null,
"abstractUrl": "/magazine/cg/2007/02/mcg2007020014/13rRUxYrbOE",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/09/08637778",
"title": "Measuring the Effects of Scalar and Spherical Colormaps on Ensembles of DMRI Tubes",
"doi": null,
"abstractUrl": "/journal/tg/2020/09/08637778/17D45WrVgbO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933760",
"title": "Evaluating Gradient Perception in Color-Coded Scalar Fields",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933760/1fTgHHw1pSM",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/06/08939459",
"title": "The Making of Continuous Colormaps",
"doi": null,
"abstractUrl": "/journal/tg/2021/06/08939459/1fZRynxLXGM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2020/05/09167329",
"title": "The Importance of Colormaps",
"doi": null,
"abstractUrl": "/magazine/cs/2020/05/09167329/1mhPJUptqpy",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09216559",
"title": "A Testing Environment for Continuous Colormaps",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09216559/1nJsOQFe8A8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222327",
"title": "Rainbows Revisited: Modeling Effective Colormap Design for Graphical Inference",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222327/1nTqMLwYD0A",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2007/02/04118486",
"title": "Rainbow Color Map (Still) Considered Harmful",
"doi": null,
"abstractUrl": "/magazine/cg/2007/02/04118486/1oCjGn4Rpss",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/07/09249052",
"title": "Rainbow Dash: Intuitiveness, Interpretability and Memorability of the Rainbow Color Scheme in Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2022/07/09249052/1ovEVPWDI4g",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09916137",
"articleId": "1HojAjSAGNq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09920175",
"articleId": "1HxSlQBsfYc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1HBHYun2dQk",
"name": "ttg555501-09919390s1-tvcg-3214771-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09919390s1-tvcg-3214771-mm.zip",
"extension": "zip",
"size": "4.18 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1HojAjSAGNq",
"doi": "10.1109/TVCG.2022.3213565",
"abstract": "Composite visualization is a popular design strategy that represents complex datasets by integrating multiple visualizations in a meaningful and aesthetic layout, such as juxtaposition, overlay, and nesting. With this strategy, numerous novel designs have been proposed in visualization publications to accomplish various visual analytic tasks. However, there is a lack of understanding of design patterns of composite visualization, thus failing to provide holistic design space and concrete examples for practical use. In this paper, we opted to revisit the composite visualizations in IEEE VIS publications and answered what and how visualizations of different types are composed together. To achieve this, we first constructed a corpus of composite visualizations from the publications and analyzed common practices, such as the pattern distributions and co-occurrence of visualization types. From the analysis, we obtained insights into different design patterns on the utilities and their potential pros and cons. Furthermore, we discussed usage scenarios of our taxonomy and corpus and how future research on visualization composition can be conducted on the basis of this study.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Composite visualization is a popular design strategy that represents complex datasets by integrating multiple visualizations in a meaningful and aesthetic layout, such as juxtaposition, overlay, and nesting. With this strategy, numerous novel designs have been proposed in visualization publications to accomplish various visual analytic tasks. However, there is a lack of understanding of design patterns of composite visualization, thus failing to provide holistic design space and concrete examples for practical use. In this paper, we opted to revisit the composite visualizations in IEEE VIS publications and answered what and how visualizations of different types are composed together. To achieve this, we first constructed a corpus of composite visualizations from the publications and analyzed common practices, such as the pattern distributions and co-occurrence of visualization types. From the analysis, we obtained insights into different design patterns on the utilities and their potential pros and cons. Furthermore, we discussed usage scenarios of our taxonomy and corpus and how future research on visualization composition can be conducted on the basis of this study.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Composite visualization is a popular design strategy that represents complex datasets by integrating multiple visualizations in a meaningful and aesthetic layout, such as juxtaposition, overlay, and nesting. With this strategy, numerous novel designs have been proposed in visualization publications to accomplish various visual analytic tasks. However, there is a lack of understanding of design patterns of composite visualization, thus failing to provide holistic design space and concrete examples for practical use. In this paper, we opted to revisit the composite visualizations in IEEE VIS publications and answered what and how visualizations of different types are composed together. To achieve this, we first constructed a corpus of composite visualizations from the publications and analyzed common practices, such as the pattern distributions and co-occurrence of visualization types. From the analysis, we obtained insights into different design patterns on the utilities and their potential pros and cons. Furthermore, we discussed usage scenarios of our taxonomy and corpus and how future research on visualization composition can be conducted on the basis of this study.",
"title": "Revisiting the Design Patterns of Composite Visualizations",
"normalizedTitle": "Revisiting the Design Patterns of Composite Visualizations",
"fno": "09916137",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Visualization",
"Taxonomy",
"Task Analysis",
"Layout",
"Grammar",
"Bars",
"Datasets",
"Visual Analytics",
"Visualization Specification",
"Visualization Design"
],
"authors": [
{
"givenName": "Dazhen",
"surname": "Deng",
"fullName": "Dazhen Deng",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Weiwei",
"surname": "Cui",
"fullName": "Weiwei Cui",
"affiliation": "Microsoft Research Asia, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiyu",
"surname": "Meng",
"fullName": "Xiyu Meng",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mengye",
"surname": "Xu",
"fullName": "Mengye Xu",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yu",
"surname": "Liao",
"fullName": "Yu Liao",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haidong",
"surname": "Zhang",
"fullName": "Haidong Zhang",
"affiliation": "Microsoft Research Asia, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yingcai",
"surname": "Wu",
"fullName": "Yingcai Wu",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-10-01 00:00:00",
"pubType": "trans",
"pages": "1-16",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/pacificvis/2017/5738/0/08031580",
"title": "Interaction+: Interaction enhancement for web-based visualizations",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2017/08031580/12OmNyQ7FJe",
"parentPublication": {
"id": "proceedings/pacificvis/2017/5738/0",
"title": "2017 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/07/08354901",
"title": "Task-Based Effectiveness of Basic Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2019/07/08354901/13rRUwd9CLU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122679",
"title": "Design Considerations for Optimizing Storyline Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122679/13rRUwhHcQR",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08233127",
"title": "Atom: A Grammar for Unit Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08233127/14H4WLzSYsE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440836",
"title": "Dynamic Composite Data Physicalization Using Wheeled Micro-Robots",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440836/17D45WWzW3c",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2018/4235/0/08506578",
"title": "Comparative Visualizations through Parameterization and Variability",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2018/08506578/17D45WaTki5",
"parentPublication": {
"id": "proceedings/vlhcc/2018/4235/0",
"title": "2018 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09984953",
"title": "VISAtlas: An Image-based Exploration and Query System for Large Visualization Collections via Neural Image Embedding",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09984953/1J6d2SwfUT6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2019/05/08744242",
"title": "Data2Vis: Automatic Generation of Data Visualizations Using Sequence-to-Sequence Recurrent Neural Networks",
"doi": null,
"abstractUrl": "/magazine/cg/2019/05/08744242/1cFV5domibu",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2019/5227/0/522700a084",
"title": "Comparing the Effectiveness of Visualizations of Different Data Distributions",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2019/522700a084/1fHloum4ISY",
"parentPublication": {
"id": "proceedings/sibgrapi/2019/5227/0",
"title": "2019 32nd SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/06/09350177",
"title": "Net2Vis – A Visual Grammar for Automatically Generating Publication-Tailored CNN Architecture Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2021/06/09350177/1r3l972fCk8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09916138",
"articleId": "1HojA9hKTO8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09919390",
"articleId": "1HsTAyyKsne",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1HojA9hKTO8",
"doi": "10.1109/TVCG.2022.3213514",
"abstract": "In developing virtual acoustic environments, it is important to understand the relationship between the computation cost and the perceptual significance of the resultant numerical error. In this paper, we propose a quality criterion that evaluates the error significance of path-tracing-based sound propagation simulators. We present an analytical formula that estimates the error signal power spectrum. With this spectrum estimation, we can use a modified Zwicker's loudness model to calculate the relative loudness of the error signal masked by the ideal output. Our experimental results show that the proposed criterion can explain the human perception of simulation error in a variety of cases.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In developing virtual acoustic environments, it is important to understand the relationship between the computation cost and the perceptual significance of the resultant numerical error. In this paper, we propose a quality criterion that evaluates the error significance of path-tracing-based sound propagation simulators. We present an analytical formula that estimates the error signal power spectrum. With this spectrum estimation, we can use a modified Zwicker's loudness model to calculate the relative loudness of the error signal masked by the ideal output. Our experimental results show that the proposed criterion can explain the human perception of simulation error in a variety of cases.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In developing virtual acoustic environments, it is important to understand the relationship between the computation cost and the perceptual significance of the resultant numerical error. In this paper, we propose a quality criterion that evaluates the error significance of path-tracing-based sound propagation simulators. We present an analytical formula that estimates the error signal power spectrum. With this spectrum estimation, we can use a modified Zwicker's loudness model to calculate the relative loudness of the error signal masked by the ideal output. Our experimental results show that the proposed criterion can explain the human perception of simulation error in a variety of cases.",
"title": "A Psychoacoustic Quality Criterion for Path-Traced Sound Propagation",
"normalizedTitle": "A Psychoacoustic Quality Criterion for Path-Traced Sound Propagation",
"fno": "09916138",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Signal To Noise Ratio",
"Psychoacoustic Models",
"Solid Modeling",
"Mathematical Models",
"Band Pass Filters",
"Spectral Analysis",
"Resonant Frequency",
"Path Tracing",
"Psychoacoustics",
"Sound Simulation",
"Virtual Reality"
],
"authors": [
{
"givenName": "Chunxiao",
"surname": "Cao",
"fullName": "Chunxiao Cao",
"affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zili",
"surname": "An",
"fullName": "Zili An",
"affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhong",
"surname": "Ren",
"fullName": "Zhong Ren",
"affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dinesh",
"surname": "Manocha",
"fullName": "Dinesh Manocha",
"affiliation": "Department of Computer Science, University of Maryland, College Park, MD, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kun",
"surname": "Zhou",
"fullName": "Kun Zhou",
"affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-10-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icca/2003/7777/0/01595165",
"title": "A Nonlinear Control Scheme Applied to Track-Seeking in Hard Disk Drive Dual Stage Servo System",
"doi": null,
"abstractUrl": "/proceedings-article/icca/2003/01595165/12OmNAYGlED",
"parentPublication": {
"id": "proceedings/icca/2003/7777/0",
"title": "4th International Conference on Control and Automation. Final Program and Book of Abstracts",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssst/1989/1933/0/00072473",
"title": "Upper bound frequencies of two dimensional signals",
"doi": null,
"abstractUrl": "/proceedings-article/ssst/1989/00072473/12OmNAYXWFC",
"parentPublication": {
"id": "proceedings/ssst/1989/1933/0",
"title": "1989 The Twenty-First Southeastern Symposium on System Theory",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/issmd/2004/8612/0/01689552",
"title": "Power spectral analysis of heart rate variability of myocardial infarcted patients (non-invasive method)",
"doi": null,
"abstractUrl": "/proceedings-article/issmd/2004/01689552/12OmNAkniUd",
"parentPublication": {
"id": "proceedings/issmd/2004/8612/0",
"title": "2004 2nd IEEE/EMBS International Summer School on Medical Devices and Biosensors",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciibms/2017/6664/0/08279747",
"title": "Classification of P300 in EEG signals for disable subjects using singular spectrum analysis",
"doi": null,
"abstractUrl": "/proceedings-article/iciibms/2017/08279747/12OmNs0TKKg",
"parentPublication": {
"id": "proceedings/iciibms/2017/6664/0",
"title": "2017 International Conference on Intelligent Informatics and Biomedical Sciences (ICIIBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icwmc/2009/3750/0/3750a122",
"title": "Average Collision Number Criterion for TH-UWB Code Selection",
"doi": null,
"abstractUrl": "/proceedings-article/icwmc/2009/3750a122/12OmNwseEWq",
"parentPublication": {
"id": "proceedings/icwmc/2009/3750/0",
"title": "Wireless and Mobile Communications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssst/1996/7352/0/73520115",
"title": "A Necessary and Sufficient Stability Criterion for Linear Time-Varying Systems",
"doi": null,
"abstractUrl": "/proceedings-article/ssst/1996/73520115/12OmNxETacc",
"parentPublication": {
"id": "proceedings/ssst/1996/7352/0",
"title": "Southeastern Symposium on System Theory",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2010/4077/1/4077a655",
"title": "Analysis of Feature Extraction Criterion Function Maximum in Nonlinear Multi-layer Feedforward Neural Networks for Pattern Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2010/4077a655/12OmNxw5BnL",
"parentPublication": {
"id": "proceedings/icicta/2010/4077/1",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisa/2011/9222/0/05772408",
"title": "Formant Synthesis of Haegeum: A Sound Analysis/Synthesis System Using Cepstral Envelope",
"doi": null,
"abstractUrl": "/proceedings-article/icisa/2011/05772408/12OmNy2rS2i",
"parentPublication": {
"id": "proceedings/icisa/2011/9222/0",
"title": "2011 International Conference on Information Science and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2004/8484/1/01326150",
"title": "SNR-dependent non-uniform spectral compression for noisy speech recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326150/12OmNzVXNRr",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/1",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2012/01/ttp2012010158",
"title": "Angular Embedding: A Robust Quadratic Criterion",
"doi": null,
"abstractUrl": "/journal/tp/2012/01/ttp2012010158/13rRUy0HYSJ",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09911682",
"articleId": "1HeiWQWKlTG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09916137",
"articleId": "1HojAjSAGNq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1HeiWQWKlTG",
"doi": "10.1109/TVCG.2022.3212089",
"abstract": "We investigate how underfoot vibrotactile feedback can be used to increase the impression of walking and embodiment of static users represented by a first- or third-person avatar. We designed a multi-sensory setup involving avatar displayed on an HMD, and a set of vibrotactile effects displayed at every footstep. In a first study (N = 44), we compared the impression of walking in 3 vibrotactile conditions : 1) with a ”constant” vibrotactile rendering reproducing simple contact information, 2) with a more sophisticated ”phase-based” vibrotactile rendering the successive contacts of a walking cycle and 3) without vibrotactile feedback. The results show that overall both constant and phase-based rendering significantly improve the impression of walking in first and third-person perspective. Interestingly, the more realistic phase-based rendering seems to increase significantly the impression of walking in the third-person condition, but not in the first-person condition. In a second study (N=28), we evaluated the embodiment towards first- and third-person avatar while receiving no vibrotactile feedback or by receiving vibrotactile feedback. The results show that vibrotactile feedback improves embodiment in both perspectives of the avatar. Taken together, our results support the use of vibrotactile feedback when users observe first- and third-person avatar. They also suggest that constant and phase-based rendering could be used with first-person avatar and support the use of phase-based rendering with third-person avatar. They provide valuable insight for stimulations in any VR applications in which the impression of walking is prominent such as for virtual visits, walking rehabilitation, video games, etc.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We investigate how underfoot vibrotactile feedback can be used to increase the impression of walking and embodiment of static users represented by a first- or third-person avatar. We designed a multi-sensory setup involving avatar displayed on an HMD, and a set of vibrotactile effects displayed at every footstep. In a first study (N = 44), we compared the impression of walking in 3 vibrotactile conditions : 1) with a ”constant” vibrotactile rendering reproducing simple contact information, 2) with a more sophisticated ”phase-based” vibrotactile rendering the successive contacts of a walking cycle and 3) without vibrotactile feedback. The results show that overall both constant and phase-based rendering significantly improve the impression of walking in first and third-person perspective. Interestingly, the more realistic phase-based rendering seems to increase significantly the impression of walking in the third-person condition, but not in the first-person condition. In a second study (N=28), we evaluated the embodiment towards first- and third-person avatar while receiving no vibrotactile feedback or by receiving vibrotactile feedback. The results show that vibrotactile feedback improves embodiment in both perspectives of the avatar. Taken together, our results support the use of vibrotactile feedback when users observe first- and third-person avatar. They also suggest that constant and phase-based rendering could be used with first-person avatar and support the use of phase-based rendering with third-person avatar. They provide valuable insight for stimulations in any VR applications in which the impression of walking is prominent such as for virtual visits, walking rehabilitation, video games, etc.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We investigate how underfoot vibrotactile feedback can be used to increase the impression of walking and embodiment of static users represented by a first- or third-person avatar. We designed a multi-sensory setup involving avatar displayed on an HMD, and a set of vibrotactile effects displayed at every footstep. In a first study (N = 44), we compared the impression of walking in 3 vibrotactile conditions : 1) with a ”constant” vibrotactile rendering reproducing simple contact information, 2) with a more sophisticated ”phase-based” vibrotactile rendering the successive contacts of a walking cycle and 3) without vibrotactile feedback. The results show that overall both constant and phase-based rendering significantly improve the impression of walking in first and third-person perspective. Interestingly, the more realistic phase-based rendering seems to increase significantly the impression of walking in the third-person condition, but not in the first-person condition. In a second study (N=28), we evaluated the embodiment towards first- and third-person avatar while receiving no vibrotactile feedback or by receiving vibrotactile feedback. The results show that vibrotactile feedback improves embodiment in both perspectives of the avatar. Taken together, our results support the use of vibrotactile feedback when users observe first- and third-person avatar. They also suggest that constant and phase-based rendering could be used with first-person avatar and support the use of phase-based rendering with third-person avatar. They provide valuable insight for stimulations in any VR applications in which the impression of walking is prominent such as for virtual visits, walking rehabilitation, video games, etc.",
"title": "Effect of Vibrations on Impression of Walking and Embodiment With First- and Third-Person Avatar",
"normalizedTitle": "Effect of Vibrations on Impression of Walking and Embodiment With First- and Third-Person Avatar",
"fno": "09911682",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Avatars",
"Rendering Computer Graphics",
"Legged Locomotion",
"Foot",
"Visualization",
"Vibrations",
"Loading",
"Action Observation",
"Avatar",
"Embodiment",
"Impression Of Walking",
"Vibrotactile Feedback",
"Virtual Reality"
],
"authors": [
{
"givenName": "Justine",
"surname": "Saint-Aubert",
"fullName": "Justine Saint-Aubert",
"affiliation": "Inria Rennes, Campus Universitaire de Beaulieu F-35042, Rennes Cedex, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Julien",
"surname": "Manson",
"fullName": "Julien Manson",
"affiliation": "Inria Rennes, Campus Universitaire de Beaulieu F-35042, Rennes Cedex, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Isabelle",
"surname": "Bonan",
"fullName": "Isabelle Bonan",
"affiliation": "Rennes University Hospital, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yoann",
"surname": "Launey",
"fullName": "Yoann Launey",
"affiliation": "Rennes University Hospital, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anatole",
"surname": "Lécuyer",
"fullName": "Anatole Lécuyer",
"affiliation": "Inria Rennes, Campus Universitaire de Beaulieu F-35042, Rennes Cedex, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mélanie",
"surname": "Cogné",
"fullName": "Mélanie Cogné",
"affiliation": "Rennes University Hospital, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-10-01 00:00:00",
"pubType": "trans",
"pages": "1-8",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2016/0842/0/07460066",
"title": "Rhythmic vibrations to heels and forefeet to produce virtual walking",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460066/12OmNBQkwZJ",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460072",
"title": "Gaitzilla: A game to study the effects of virtual embodiment in gait rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460072/12OmNBqv2nN",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09744001",
"title": "Influence of user posture and virtual exercise on impression of locomotion during VR observation",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09744001/1C8BFV420lq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a792",
"title": "Perception of Symmetry of Actual and Modulated Self-Avatar Gait Movements During Treadmill Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a792/1CJe47o4BRm",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a552",
"title": "Persuasive Vibrations: Effects of Speech-Based Vibrations on Persuasion, Leadership, and Co-Presence During Verbal Communication in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a552/1MNgYjAysYU",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798263",
"title": "EEG Can Be Used to Measure Embodiment When Controlling a Walking Self-Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798263/1cJ1gj5NtQc",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798345",
"title": "Investigation of Visual Self-Representation for a Walking-in-Place Navigation System in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798345/1cJ1hpkUgHS",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998305",
"title": "Avatar and Sense of Embodiment: Studying the Relative Preference Between Appearance, Control and Point of View",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998305/1hpPBuW1ahy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090634",
"title": "Rhythmic proprioceptive stimulation improves embodiment in a walking avatar when added to visual stimulation",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090634/1jIxkrgIlEY",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090453",
"title": "Perception of Walking Self-body Avatar Enhances Virtual-walking Sensation",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090453/1jIxoojmMy4",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09910018",
"articleId": "1HcjhcNOxUI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09916138",
"articleId": "1HojA9hKTO8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1HfCw7JuGys",
"name": "ttg555501-09911682s1-supp1-3212089.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09911682s1-supp1-3212089.mp4",
"extension": "mp4",
"size": "11.7 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1HbasJ1nQTC",
"doi": "10.1109/TVCG.2022.3211414",
"abstract": "Ferrofluids are oil-based liquids containing magnetic particles that interact with magnetic fields without solidifying. Leveraging the exploration of new applications of these promising materials (such as in optics, medicine and engineering) requires high fidelity modeling and simulation capabilities in order to accurately explore ferrofluids <italic>in silico</italic>. While recent work addressed the macroscopic simulation of large-scale ferrofluids using smoothed-particle hydrodynamics (SPH), such simulations are computationally expensive. In their work, the Kelvin force model has been used to calculate interactions between different SPH particles. The application of this model results in a force pointing outwards with respect to the fluid surface causing significant levitation problems. This drawback limits the application of more advanced and efficient SPH frameworks such as divergence-free SPH (DFSPH) or implicit incompressible SPH (IISPH). In this contribution, we propose a current loop magnetic force model which enables the fast macroscopic simulation of ferrofluids. Our new force model results in a force term pointing inwards allowing for more stable and fast simulations of ferrofluids using DFSPH and IISPH.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Ferrofluids are oil-based liquids containing magnetic particles that interact with magnetic fields without solidifying. Leveraging the exploration of new applications of these promising materials (such as in optics, medicine and engineering) requires high fidelity modeling and simulation capabilities in order to accurately explore ferrofluids <italic>in silico</italic>. While recent work addressed the macroscopic simulation of large-scale ferrofluids using smoothed-particle hydrodynamics (SPH), such simulations are computationally expensive. In their work, the Kelvin force model has been used to calculate interactions between different SPH particles. The application of this model results in a force pointing outwards with respect to the fluid surface causing significant levitation problems. This drawback limits the application of more advanced and efficient SPH frameworks such as divergence-free SPH (DFSPH) or implicit incompressible SPH (IISPH). In this contribution, we propose a current loop magnetic force model which enables the fast macroscopic simulation of ferrofluids. Our new force model results in a force term pointing inwards allowing for more stable and fast simulations of ferrofluids using DFSPH and IISPH.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Ferrofluids are oil-based liquids containing magnetic particles that interact with magnetic fields without solidifying. Leveraging the exploration of new applications of these promising materials (such as in optics, medicine and engineering) requires high fidelity modeling and simulation capabilities in order to accurately explore ferrofluids in silico. While recent work addressed the macroscopic simulation of large-scale ferrofluids using smoothed-particle hydrodynamics (SPH), such simulations are computationally expensive. In their work, the Kelvin force model has been used to calculate interactions between different SPH particles. The application of this model results in a force pointing outwards with respect to the fluid surface causing significant levitation problems. This drawback limits the application of more advanced and efficient SPH frameworks such as divergence-free SPH (DFSPH) or implicit incompressible SPH (IISPH). In this contribution, we propose a current loop magnetic force model which enables the fast macroscopic simulation of ferrofluids. Our new force model results in a force term pointing inwards allowing for more stable and fast simulations of ferrofluids using DFSPH and IISPH.",
"title": "A Current Loop Model for the Fast Simulation of Ferrofluids",
"normalizedTitle": "A Current Loop Model for the Fast Simulation of Ferrofluids",
"fno": "09907886",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Computational Modeling",
"Mathematical Models",
"Force",
"Numerical Models",
"Surface Tension",
"Magnetomechanical Effects",
"Magnetic Forces",
"Computational Electromagnetics",
"Divergence Free SPH DFSPH",
"Ferrofluids",
"Fluid Mechanics",
"Implicit Incompressible SPH IISPH",
"Large Scale Simulations",
"Magnetic Fluids",
"Maxwells Equations",
"Natural Phenomena",
"Navier Stokes Equations",
"Numerical Simulations",
"Smoothed Particle Hydrodynamics SPH"
],
"authors": [
{
"givenName": "Han",
"surname": "Shao",
"fullName": "Han Shao",
"affiliation": "Computational Sciences Group at the Visual Computing Center, KAUST, Thuwal, Kingdom of Saudi Arabia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Libo",
"surname": "Huang",
"fullName": "Libo Huang",
"affiliation": "Computational Sciences Group at the Visual Computing Center, KAUST, Thuwal, Kingdom of Saudi Arabia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dominik L.",
"surname": "Michels",
"fullName": "Dominik L. Michels",
"affiliation": "Computational Sciences Group at the Visual Computing Center, KAUST, Thuwal, Kingdom of Saudi Arabia",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-09-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cse/2014/7981/0/7981a055",
"title": "Design and Development of Sypinge-Type Magnetorheological Damper",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2014/7981a055/12OmNCwUmCk",
"parentPublication": {
"id": "proceedings/cse/2014/7981/0",
"title": "2014 IEEE 17th International Conference on Computational Science and Engineering (CSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse/2014/7981/0/7981a072",
"title": "Semi-active Spiral Flow Channel Magnetorheological Damper",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2014/7981a072/12OmNxw5ByD",
"parentPublication": {
"id": "proceedings/cse/2014/7981/0",
"title": "2014 IEEE 17th International Conference on Computational Science and Engineering (CSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/10/07932108",
"title": "Pairwise Force SPH Model for Real-Time Multi-Interaction Applications",
"doi": null,
"abstractUrl": "/journal/tg/2017/10/07932108/13rRUyYjKan",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcmeim/2021/2172/0/217200a485",
"title": "Design of a magnetic gear reducer",
"doi": null,
"abstractUrl": "/proceedings-article/wcmeim/2021/217200a485/1ANLE9xy9uU",
"parentPublication": {
"id": "proceedings/wcmeim/2021/2172/0",
"title": "2021 4th World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csase/2022/2632/0/09759738",
"title": "Computational Technique for the Modeling on MHD Boundary Layer Flow Unsteady Stretching Sheet by B-Spline Function",
"doi": null,
"abstractUrl": "/proceedings-article/csase/2022/09759738/1CRw4HHg04U",
"parentPublication": {
"id": "proceedings/csase/2022/2632/0",
"title": "2022 International Conference on Computer Science and Software Engineering (CSASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnisc/2018/6956/0/695600a355",
"title": "A Current-to-Voltage Conversion Device Using a NdFeB Magnet, Permalloy, and Shear-Mode Piezoelectric Material",
"doi": null,
"abstractUrl": "/proceedings-article/icnisc/2018/695600a355/1dUo1mHHo7m",
"parentPublication": {
"id": "proceedings/icnisc/2018/6956/0",
"title": "2018 4th Annual International Conference on Network and Information Systems for Computers (ICNISC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcmeim/2019/5045/0/504500a021",
"title": "Simulation Analysis of Electromagnetic Vibration of Rotor Compressor Motor Based on Modal Superposition Method",
"doi": null,
"abstractUrl": "/proceedings-article/wcmeim/2019/504500a021/1hHLtwUmSvm",
"parentPublication": {
"id": "proceedings/wcmeim/2019/5045/0",
"title": "2019 2nd World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icei/2020/0500/0/050000a147",
"title": "Analysis of Influence of Uneven Air Gap of Hydrogenerator on Magnetic Field Strength and Rotor Magnetic Pole Stress Change",
"doi": null,
"abstractUrl": "/proceedings-article/icei/2020/050000a147/1pcSOOtQo92",
"parentPublication": {
"id": "proceedings/icei/2020/0500/0",
"title": "2020 IEEE International Conference on Energy Internet (ICEI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cseps/2021/2618/0/261800a315",
"title": "Establishment of Electromagnetic Model of Permanent Magnet Synchronous Motor for Electric Machine Emulator",
"doi": null,
"abstractUrl": "/proceedings-article/cseps/2021/261800a315/1wiQKbmaMW4",
"parentPublication": {
"id": "proceedings/cseps/2021/2618/0",
"title": "2021 International Conference on Control Science and Electric Power Systems (CSEPS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmeas/2021/9768/0/976800a050",
"title": "Magnetic Circuit Simulation and Analysis of Shear-Valve Mode MRF Damper",
"doi": null,
"abstractUrl": "/proceedings-article/icmeas/2021/976800a050/1zuuVD3H7Qk",
"parentPublication": {
"id": "proceedings/icmeas/2021/9768/0",
"title": "2021 7th International Conference on Mechanical Engineering and Automation Science (ICMEAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09905944",
"articleId": "1H3ZWyzEaha",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09909994",
"articleId": "1Hcj8wIB6s8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1HmgbCmAJW0",
"name": "ttg555501-09907886s1-supp1-3211414.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09907886s1-supp1-3211414.mp4",
"extension": "mp4",
"size": "44.8 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Hcj8wIB6s8",
"doi": "10.1109/TVCG.2022.3204608",
"abstract": "View synthesis methods using implicit continuous shape representations learned from a set of images, such as the Neural Radiance Field (NeRF) method, have gained increasing attention due to their high quality imagery and scalability to high resolution. However, the heavy computation required by its volumetric approach prevents NeRF from being useful in practice; minutes are taken to render a single image of a few megapixels. Now, an image of a scene can be rendered in a level-of-detail manner, so we posit that a complicated region of the scene should be represented by a large neural network while a small neural network is capable of encoding a simple region, enabling a balance between efficiency and quality. <italic>Recursive-NeRF</italic> is our embodiment of this idea, providing an efficient and adaptive rendering and training approach for NeRF. The core of Recursive-NeRF learns uncertainties for query coordinates, representing the quality of the predicted color and volumetric intensity at each level. Only query coordinates with high uncertainties are forwarded to the next level to a bigger neural network with a more powerful representational capability. The final rendered image is a composition of results from neural networks of all levels. Our evaluation on public datasets and a large-scale scene dataset we collected shows that Recursive-NeRF is more efficient than NeRF while providing state-of-the-art quality. The code will be available at https://github.com/Gword/Recursive-NeRF.",
"abstracts": [
{
"abstractType": "Regular",
"content": "View synthesis methods using implicit continuous shape representations learned from a set of images, such as the Neural Radiance Field (NeRF) method, have gained increasing attention due to their high quality imagery and scalability to high resolution. However, the heavy computation required by its volumetric approach prevents NeRF from being useful in practice; minutes are taken to render a single image of a few megapixels. Now, an image of a scene can be rendered in a level-of-detail manner, so we posit that a complicated region of the scene should be represented by a large neural network while a small neural network is capable of encoding a simple region, enabling a balance between efficiency and quality. <italic>Recursive-NeRF</italic> is our embodiment of this idea, providing an efficient and adaptive rendering and training approach for NeRF. The core of Recursive-NeRF learns uncertainties for query coordinates, representing the quality of the predicted color and volumetric intensity at each level. Only query coordinates with high uncertainties are forwarded to the next level to a bigger neural network with a more powerful representational capability. The final rendered image is a composition of results from neural networks of all levels. Our evaluation on public datasets and a large-scale scene dataset we collected shows that Recursive-NeRF is more efficient than NeRF while providing state-of-the-art quality. The code will be available at https://github.com/Gword/Recursive-NeRF.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "View synthesis methods using implicit continuous shape representations learned from a set of images, such as the Neural Radiance Field (NeRF) method, have gained increasing attention due to their high quality imagery and scalability to high resolution. However, the heavy computation required by its volumetric approach prevents NeRF from being useful in practice; minutes are taken to render a single image of a few megapixels. Now, an image of a scene can be rendered in a level-of-detail manner, so we posit that a complicated region of the scene should be represented by a large neural network while a small neural network is capable of encoding a simple region, enabling a balance between efficiency and quality. Recursive-NeRF is our embodiment of this idea, providing an efficient and adaptive rendering and training approach for NeRF. The core of Recursive-NeRF learns uncertainties for query coordinates, representing the quality of the predicted color and volumetric intensity at each level. Only query coordinates with high uncertainties are forwarded to the next level to a bigger neural network with a more powerful representational capability. The final rendered image is a composition of results from neural networks of all levels. Our evaluation on public datasets and a large-scale scene dataset we collected shows that Recursive-NeRF is more efficient than NeRF while providing state-of-the-art quality. The code will be available at https://github.com/Gword/Recursive-NeRF.",
"title": "Recursive-NeRF: An Efficient and Dynamically Growing NeRF",
"normalizedTitle": "Recursive-NeRF: An Efficient and Dynamically Growing NeRF",
"fno": "09909994",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Rendering Computer Graphics",
"Neural Networks",
"Complexity Theory",
"Uncertainty",
"Training",
"Three Dimensional Displays",
"Image Color Analysis",
"3 D Deep Learning",
"Image Based Rendering",
"Scene Representation",
"View Synthesis",
"Volume Rendering"
],
"authors": [
{
"givenName": "Guo-Wei",
"surname": "Yang",
"fullName": "Guo-Wei Yang",
"affiliation": "BNRist, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wen-Yang",
"surname": "Zhou",
"fullName": "Wen-Yang Zhou",
"affiliation": "BNRist, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hao-Yang",
"surname": "Peng",
"fullName": "Hao-Yang Peng",
"affiliation": "BNRist, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dun",
"surname": "Liang",
"fullName": "Dun Liang",
"affiliation": "BNRist, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tai-Jiang",
"surname": "Mu",
"fullName": "Tai-Jiang Mu",
"affiliation": "BNRist, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shi-Min",
"surname": "Hu",
"fullName": "Shi-Min Hu",
"affiliation": "BNRist, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-10-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2021/2812/0/281200f865",
"title": "Putting NeRF on a Diet: Semantically Consistent Few-Shot View Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200f865/1BmLttoRg3e",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600s8321",
"title": "StylizedNeRF: Consistent 3D Scene Stylization as Stylized NeRF via 2D-3D Mutual Learning",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600s8321/1H0L3Z762gU",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600s8332",
"title": "NeRF-Editing: Geometry Editing of Neural Radiance Fields",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600s8332/1H0Nn4Xgsne",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600f460",
"title": "Mip-NeRF 360: Unbounded Anti-Aliased Neural Radiance Fields",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600f460/1H0OphoghaM",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600i238",
"title": "Block-NeRF: Scalable Large Scene Neural View Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600i238/1H1hVQ0jgBy",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600m2872",
"title": "Depth-supervised NeRF: Fewer Views and Faster Training for Free",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600m2872/1H1ieODToYw",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600p5170",
"title": "Aug-NeRF: Training Stronger Neural Radiance Fields with Triple-Level Physically-Grounded Augmentations",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600p5170/1H1jhjLRpRu",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600f428",
"title": "Point-NeRF: Point-based Neural Radiance Fields",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600f428/1H1mrGLgvra",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900h206",
"title": "NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900h206/1yeLpJjmuwE",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900k0313",
"title": "D-NeRF: Neural Radiance Fields for Dynamic Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900k0313/1yeLrBwGgik",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09907886",
"articleId": "1HbasJ1nQTC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09910018",
"articleId": "1HcjhcNOxUI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1HeiSpzzoha",
"name": "ttg555501-09909994s1-supp1-3204608.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09909994s1-supp1-3204608.mp4",
"extension": "mp4",
"size": "195 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1HcjhcNOxUI",
"doi": "10.1109/TVCG.2022.3211781",
"abstract": "Locating neck-like features, or locally narrow parts, of a surface is crucial in various applications such as segmentation, shape analysis, path planning, and robotics. Topological methods are often utilized to find the set of shortest loops around handles and tunnels. However, there are abundant neck-like features on genus-0 shapes without any handles. While 3D geometry-aware topological approaches exist to find neck loops, their construction can be cumbersome and may even lead to geometrically wide loops. Thus we propose a “topology-aware geometric approach” to compute the tightest loops around neck features on surfaces, including genus-0 surfaces. Our algorithm starts with a volumetric representation of an input surface and then calculates the distance function of mesh points to the boundary surface as a Morse function. All neck features induce critical points of this Morse function where the Hessian matrix has precisely one positive eigenvalue, i.e., type-2 saddles. As we focus on geometric neck features, we bypass a topological construction such as the Morse-Smale complex or a lower-star filtration. Instead, we directly create a cutting plane through each neck feature. Each resulting loop can then be tightened to form a closed geodesic representation of the neck feature. Moreover, we offer criteria to measure the significance of a neck feature through the evolution of critical points when smoothing the distance function. Furthermore, we speed up the detection process through mesh simplification without compromising the quality of the output loops.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Locating neck-like features, or locally narrow parts, of a surface is crucial in various applications such as segmentation, shape analysis, path planning, and robotics. Topological methods are often utilized to find the set of shortest loops around handles and tunnels. However, there are abundant neck-like features on genus-0 shapes without any handles. While 3D geometry-aware topological approaches exist to find neck loops, their construction can be cumbersome and may even lead to geometrically wide loops. Thus we propose a “topology-aware geometric approach” to compute the tightest loops around neck features on surfaces, including genus-0 surfaces. Our algorithm starts with a volumetric representation of an input surface and then calculates the distance function of mesh points to the boundary surface as a Morse function. All neck features induce critical points of this Morse function where the Hessian matrix has precisely one positive eigenvalue, i.e., type-2 saddles. As we focus on geometric neck features, we bypass a topological construction such as the Morse-Smale complex or a lower-star filtration. Instead, we directly create a cutting plane through each neck feature. Each resulting loop can then be tightened to form a closed geodesic representation of the neck feature. Moreover, we offer criteria to measure the significance of a neck feature through the evolution of critical points when smoothing the distance function. Furthermore, we speed up the detection process through mesh simplification without compromising the quality of the output loops.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Locating neck-like features, or locally narrow parts, of a surface is crucial in various applications such as segmentation, shape analysis, path planning, and robotics. Topological methods are often utilized to find the set of shortest loops around handles and tunnels. However, there are abundant neck-like features on genus-0 shapes without any handles. While 3D geometry-aware topological approaches exist to find neck loops, their construction can be cumbersome and may even lead to geometrically wide loops. Thus we propose a “topology-aware geometric approach” to compute the tightest loops around neck features on surfaces, including genus-0 surfaces. Our algorithm starts with a volumetric representation of an input surface and then calculates the distance function of mesh points to the boundary surface as a Morse function. All neck features induce critical points of this Morse function where the Hessian matrix has precisely one positive eigenvalue, i.e., type-2 saddles. As we focus on geometric neck features, we bypass a topological construction such as the Morse-Smale complex or a lower-star filtration. Instead, we directly create a cutting plane through each neck feature. Each resulting loop can then be tightened to form a closed geodesic representation of the neck feature. Moreover, we offer criteria to measure the significance of a neck feature through the evolution of critical points when smoothing the distance function. Furthermore, we speed up the detection process through mesh simplification without compromising the quality of the output loops.",
"title": "Fast Computation of Neck-like Features",
"normalizedTitle": "Fast Computation of Neck-like Features",
"fno": "09910018",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Neck",
"Shape",
"Laplace Equations",
"Filtration",
"Three Dimensional Displays",
"Smoothing Methods",
"Computational Modeling",
"Computer Graphics",
"Computational Geometry",
"And Object Modeling",
"Curve",
"Surface",
"Object Representations"
],
"authors": [
{
"givenName": "Hayam",
"surname": "Abdelrahman",
"fullName": "Hayam Abdelrahman",
"affiliation": "Department of Computer Science and Engineering, Michigan State University, East Lansing, MI, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yiying",
"surname": "Tong",
"fullName": "Yiying Tong",
"affiliation": "Department of Computer Science and Engineering, Michigan State University, East Lansing, MI, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-10-01 00:00:00",
"pubType": "trans",
"pages": "1-10",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/itme/2016/3906/0/3906a333",
"title": "The Features of Lymph Node Metastasis of Differentiated Thyroid Carcinoma and the Choice of Lateral Neck Lymph Nodes Dissection",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2016/3906a333/12OmNBO3K3F",
"parentPublication": {
"id": "proceedings/itme/2016/3906/0",
"title": "2016 8th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2014/2874/0/2874a049",
"title": "2D Vector Field Simplification Based on Robustness",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2014/2874a049/12OmNwpoFGP",
"parentPublication": {
"id": "proceedings/pacificvis/2014/2874/0",
"title": "2014 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118c957",
"title": "Robust 3D Features for Matching between Distorted Range Scans Captured by Moving Systems",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118c957/12OmNy5hRg0",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/08/ttg2013081298",
"title": "Choking Loops on Surfaces",
"doi": null,
"abstractUrl": "/journal/tg/2013/08/ttg2013081298/13rRUNvyakN",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/08/07117431",
"title": "Robustness-Based Simplification of 2D Steady and Unsteady Vector Fields",
"doi": null,
"abstractUrl": "/journal/tg/2015/08/07117431/13rRUwhHcJl",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/06/07018984",
"title": "Fast Edge-Aware Processing via First Order Proximal Approximation",
"doi": null,
"abstractUrl": "/journal/tg/2015/06/07018984/13rRUxAASTe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/01/08509134",
"title": "Efficient Inter-Geodesic Distance Computation and Fast Classical Scaling",
"doi": null,
"abstractUrl": "/journal/tp/2020/01/08509134/14Fq0W8dzaM",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/04/08540419",
"title": "Spectral Mesh Segmentation via <inline-formula><tex-math notation=\"LaTeX\">Z_$\\ell _0$_Z</tex-math></inline-formula> Gradient Minimization",
"doi": null,
"abstractUrl": "/journal/tg/2020/04/08540419/17D45XoXP4p",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cyberc/2019/2542/0/254200a245",
"title": "A Method for Extracting Topological Features of Internet Testbeds Oriented to Equivalent Deduction",
"doi": null,
"abstractUrl": "/proceedings-article/cyberc/2019/254200a245/1gjRZ4ev0wo",
"parentPublication": {
"id": "proceedings/cyberc/2019/2542/0",
"title": "2019 International Conference on Cyber-Enabled Distributed Computing and Knowledge Discovery (CyberC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2021/9184/0/918400b691",
"title": "Fast Similarity Computation for t-SNE",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2021/918400b691/1uGXpqMZXag",
"parentPublication": {
"id": "proceedings/icde/2021/9184/0",
"title": "2021 IEEE 37th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09909994",
"articleId": "1Hcj8wIB6s8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09911682",
"articleId": "1HeiWQWKlTG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1Hmgb4Bb3Ec",
"name": "ttg555501-09910018s1-tvcg-3211781-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09910018s1-tvcg-3211781-mm.zip",
"extension": "zip",
"size": "13.5 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1H3ZWyzEaha",
"doi": "10.1109/TVCG.2022.3210763",
"abstract": "Idealized probability distributions, such as normal or other curves, lie at the root of confirmatory statistical tests. But how well do people understand these idealized curves? In practical terms, does the human visual system allow us to match sample data distributions with hypothesized population distributions from which those samples might have been drawn? And how do different visualization techniques impact this capability? This paper shares the results of a crowdsourced experiment that tested the ability of respondents to fit normal curves to four different data distribution visualizations: bar histograms, dotplot histograms, strip plots, and boxplots. We find that the crowd can estimate the center (mean) of a distribution with some success and little bias. We also find that people generally overestimate the standard deviation—which we dub the “umbrella effect” because people tend to want to cover the whole distribution using the curve, as if sheltering it from the heavens above—and that strip plots yield the best accuracy.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Idealized probability distributions, such as normal or other curves, lie at the root of confirmatory statistical tests. But how well do people understand these idealized curves? In practical terms, does the human visual system allow us to match sample data distributions with hypothesized population distributions from which those samples might have been drawn? And how do different visualization techniques impact this capability? This paper shares the results of a crowdsourced experiment that tested the ability of respondents to fit normal curves to four different data distribution visualizations: bar histograms, dotplot histograms, strip plots, and boxplots. We find that the crowd can estimate the center (mean) of a distribution with some success and little bias. We also find that people generally overestimate the standard deviation—which we dub the “umbrella effect” because people tend to want to cover the whole distribution using the curve, as if sheltering it from the heavens above—and that strip plots yield the best accuracy.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Idealized probability distributions, such as normal or other curves, lie at the root of confirmatory statistical tests. But how well do people understand these idealized curves? In practical terms, does the human visual system allow us to match sample data distributions with hypothesized population distributions from which those samples might have been drawn? And how do different visualization techniques impact this capability? This paper shares the results of a crowdsourced experiment that tested the ability of respondents to fit normal curves to four different data distribution visualizations: bar histograms, dotplot histograms, strip plots, and boxplots. We find that the crowd can estimate the center (mean) of a distribution with some success and little bias. We also find that people generally overestimate the standard deviation—which we dub the “umbrella effect” because people tend to want to cover the whole distribution using the curve, as if sheltering it from the heavens above—and that strip plots yield the best accuracy.",
"title": "Fitting Bell Curves to Data Distributions Using Visualization",
"normalizedTitle": "Fitting Bell Curves to Data Distributions Using Visualization",
"fno": "09905944",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Visualization",
"Task Analysis",
"Shape",
"Histograms",
"Bars",
"Strips",
"Crowdsourcing",
"Fitting Distributions",
"Graphical Inference",
"Statistics By Eye",
"Visual Statistics"
],
"authors": [
{
"givenName": "Eric",
"surname": "Newburger",
"fullName": "Eric Newburger",
"affiliation": "University of Maryland, College Park, MD, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Michael",
"surname": "Correll",
"fullName": "Michael Correll",
"affiliation": "Tableau Research, Seattle, WA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Niklas",
"surname": "Elmqvist",
"fullName": "Niklas Elmqvist",
"affiliation": "University of Maryland, College Park, MD, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-09-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2016/1437/0/1437b066",
"title": "Fast Dynamic Programming for Elastic Registration of Curves",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2016/1437b066/12OmNCw3z4V",
"parentPublication": {
"id": "proceedings/cvprw/2016/1437/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2013/2576/0/06814978",
"title": "Fitting Multiple Curves to Point Clouds with Complicated Topological Structures",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2013/06814978/12OmNxwncAi",
"parentPublication": {
"id": "proceedings/cad-graphics/2013/2576/0",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2017/0852/0/0852a099",
"title": "Stem & Leaf Plots Extended for Text Visualizations",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2017/0852a099/12OmNy5zspa",
"parentPublication": {
"id": "proceedings/cgiv/2017/0852/0",
"title": "2017 14th International Conference on Computer Graphics, Imaging and Visualization (CGiV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1992/2855/0/00223220",
"title": "Parametrizing and fitting bounded algebraic curves and surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1992/00223220/12OmNyLA5xY",
"parentPublication": {
"id": "proceedings/cvpr/1992/2855/0",
"title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1990/2062/1/00118145",
"title": "Fitting smooth curves",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1990/00118145/12OmNynsbu5",
"parentPublication": {
"id": "proceedings/icpr/1990/2062/1",
"title": "Proceedings 10th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017644",
"title": "Nonlinear Dot Plots",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017644/13rRUNvgz4o",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06875964",
"title": "Curve Boxplot: Generalization of Boxplot for Ensembles of Curves",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06875964/13rRUwInvf7",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/03/07836349",
"title": "DSPCP: A Data Scalable Approach for Identifying Relationships in Parallel Coordinates",
"doi": null,
"abstractUrl": "/journal/tg/2018/03/07836349/13rRUxZzAhK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904443",
"title": "Relaxed Dot Plots: Faithful Visualization of Samples and Their Distribution",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904443/1H1gjXXGG2s",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552205",
"title": "Visualization Equilibrium",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552205/1xic4zmtlra",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09905473",
"articleId": "1H2lfN8dsT6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09907886",
"articleId": "1HbasJ1nQTC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1H5EWyrX6MM",
"name": "ttg555501-09905944s1-supp1-3210763.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09905944s1-supp1-3210763.pdf",
"extension": "pdf",
"size": "1.14 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1H2ljAppvkA",
"doi": "10.1109/TVCG.2022.3209631",
"abstract": "We present an approach for interactively visualizing data using color-changing inks without the need for electronic displays or computers. Color-changing inks are a family of physical inks that change their color characteristics in response to an external stimulus such as heat, UV light, water, and pressure. Visualizations created using color-changing inks can embed interactivity in printed material without external computational media. In this paper, we survey current color-changing ink technology and then use these findings to derive a framework for how it can be used to construct interactive data representations. We also enumerate the interaction techniques possible using this technology. We then show some examples of how to use color-changing ink to create interactive visualizations on paper. While obviously limited in scope to situations where no power or computing is present, or as a complement to digital displays, our findings can be employed for paper, data physicalization, and embedded visualizations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present an approach for interactively visualizing data using color-changing inks without the need for electronic displays or computers. Color-changing inks are a family of physical inks that change their color characteristics in response to an external stimulus such as heat, UV light, water, and pressure. Visualizations created using color-changing inks can embed interactivity in printed material without external computational media. In this paper, we survey current color-changing ink technology and then use these findings to derive a framework for how it can be used to construct interactive data representations. We also enumerate the interaction techniques possible using this technology. We then show some examples of how to use color-changing ink to create interactive visualizations on paper. While obviously limited in scope to situations where no power or computing is present, or as a complement to digital displays, our findings can be employed for paper, data physicalization, and embedded visualizations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present an approach for interactively visualizing data using color-changing inks without the need for electronic displays or computers. Color-changing inks are a family of physical inks that change their color characteristics in response to an external stimulus such as heat, UV light, water, and pressure. Visualizations created using color-changing inks can embed interactivity in printed material without external computational media. In this paper, we survey current color-changing ink technology and then use these findings to derive a framework for how it can be used to construct interactive data representations. We also enumerate the interaction techniques possible using this technology. We then show some examples of how to use color-changing ink to create interactive visualizations on paper. While obviously limited in scope to situations where no power or computing is present, or as a complement to digital displays, our findings can be employed for paper, data physicalization, and embedded visualizations.",
"title": "Sensemaking Sans Power: Interactive Data Visualization Using Color-Changing Ink",
"normalizedTitle": "Sensemaking Sans Power: Interactive Data Visualization Using Color-Changing Ink",
"fno": "09904859",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Ink",
"Image Color Analysis",
"Three Dimensional Displays",
"Visualization",
"Media",
"Paints",
"Physical Computing",
"Color Changing Inks",
"Design Space",
"Data Physicalization",
"Data Visualization"
],
"authors": [
{
"givenName": "Biswaksen",
"surname": "Patnaik",
"fullName": "Biswaksen Patnaik",
"affiliation": "University of Maryland, College Park, MD, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huaishu",
"surname": "Peng",
"fullName": "Huaishu Peng",
"affiliation": "University of Maryland, College Park, MD, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Niklas",
"surname": "Elmqvist",
"fullName": "Niklas Elmqvist",
"affiliation": "University of Maryland, College Park, MD, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-09-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/euc/2011/4552/0/4552a221",
"title": "AmbiKraf: A Nonemissive Fabric Display for Fast Changing Textile Animation",
"doi": null,
"abstractUrl": "/proceedings-article/euc/2011/4552a221/12OmNAo45Kh",
"parentPublication": {
"id": "proceedings/euc/2011/4552/0",
"title": "Embedded and Ubiquitous Computing, IEEE/IFIP International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/das/2018/3346/0/3346a393",
"title": "Automated Forgery Detection in Multispectral Document Images Using Fuzzy Clustering",
"doi": null,
"abstractUrl": "/proceedings-article/das/2018/3346a393/12OmNBa2iFf",
"parentPublication": {
"id": "proceedings/das/2018/3346/0",
"title": "2018 13th IAPR International Workshop on Document Analysis Systems (DAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2013/4999/0/06628744",
"title": "Hyperspectral Imaging for Ink Mismatch Detection",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2013/06628744/12OmNCd2ryG",
"parentPublication": {
"id": "proceedings/icdar/2013/4999/0",
"title": "2013 12th International Conference on Document Analysis and Recognition (ICDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206554",
"title": "Interval HSV: Extracting ink annotations",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206554/12OmNxb5huc",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1997/8183/1/81831803",
"title": "Multiple layer screening for reducing moire patterning and ink bleeding",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1997/81831803/12OmNxiKs6p",
"parentPublication": {
"id": "proceedings/icip/1997/8183/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2017/3586/1/3586b229",
"title": "Towards Automated Ink Mismatch Detection in Hyperspectral Document Images",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2017/3586b229/12OmNzkMlS9",
"parentPublication": {
"id": "proceedings/icdar/2017/3586/1",
"title": "2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/02/v0235",
"title": "Image-Based Color Ink Diffusion Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2007/02/v0235/13rRUxASuSC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/i-span/2018/8534/0/853400a193",
"title": "Two-Stage Color ink Painting Style Transfer via Convolution Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/i-span/2018/853400a193/17D45XvMcbn",
"parentPublication": {
"id": "proceedings/i-span/2018/8534/0",
"title": "2018 15th International Symposium on Pervasive Systems, Algorithms and Networks (I-SPAN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdiime/2022/9009/0/900900a022",
"title": "Research on Ink Speed Recognition Method of Hyperspectral Imaging Ink Pad Based on Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icdiime/2022/900900a022/1Iz57amKlnG",
"parentPublication": {
"id": "proceedings/icdiime/2022/9009/0",
"title": "2022 International Conference on 3D Immersion, Interaction and Multi-sensory Experiences (ICDIIME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdarw/2019/5054/8/505408a025",
"title": "Comparison of Ink Classification Capabilities of Classic Hyperspectral Similarity Features",
"doi": null,
"abstractUrl": "/proceedings-article/icdarw/2019/505408a025/1eLyft5LBqE",
"parentPublication": {
"id": "icdarw/2019/5054/8",
"title": "2019 International Conference on Document Analysis and Recognition Workshops (ICDARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09905473",
"articleId": "1H2lfN8dsT6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09905944",
"articleId": "1H3ZWyzEaha",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1H3ZZ9poxJC",
"name": "ttg555501-09904859s1-supp1-3209631.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09904859s1-supp1-3209631.mp4",
"extension": "mp4",
"size": "187 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1H2lfN8dsT6",
"doi": "10.1109/TVCG.2022.3209328",
"abstract": "Electroactive polymers are frequently used in engineering applications due to their ability to change their shape and properties under the influence of an electric field. This process also works vice versa, such that mechanical deformation of the material induces an electric field in the EAP device. This specific behavior makes such materials highly attractive for the construction of actuators and sensors in various application areas. The electromechanical behaviour of electroactive polymers can be described by a third-order coupling tensor, which represents the sensitivity of mechanical stresses concerning the electric field, i.e., it establishes a relation between a second-order and a first-order tensor field. Due to this coupling tensor's complexity and the lack of meaningful visualization methods for third-order tensors in general, an interpretation of the tensor is rather difficult. Thus, the central engineering research question that this contribution deals with is a deeper understanding of electromechanical coupling by analyzing the third-order coupling tensor with the help of specific visualization methods. Starting with a deviatoric decomposition of the tensor, the multipoles of each deviator are visualized, which allows a first insight into this highly complex third-order tensor. In the present contribution, four examples, including electromechanical coupling, are simulated within a finite element framework and subsequently analyzed using the tensor visualization method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Electroactive polymers are frequently used in engineering applications due to their ability to change their shape and properties under the influence of an electric field. This process also works vice versa, such that mechanical deformation of the material induces an electric field in the EAP device. This specific behavior makes such materials highly attractive for the construction of actuators and sensors in various application areas. The electromechanical behaviour of electroactive polymers can be described by a third-order coupling tensor, which represents the sensitivity of mechanical stresses concerning the electric field, i.e., it establishes a relation between a second-order and a first-order tensor field. Due to this coupling tensor's complexity and the lack of meaningful visualization methods for third-order tensors in general, an interpretation of the tensor is rather difficult. Thus, the central engineering research question that this contribution deals with is a deeper understanding of electromechanical coupling by analyzing the third-order coupling tensor with the help of specific visualization methods. Starting with a deviatoric decomposition of the tensor, the multipoles of each deviator are visualized, which allows a first insight into this highly complex third-order tensor. In the present contribution, four examples, including electromechanical coupling, are simulated within a finite element framework and subsequently analyzed using the tensor visualization method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Electroactive polymers are frequently used in engineering applications due to their ability to change their shape and properties under the influence of an electric field. This process also works vice versa, such that mechanical deformation of the material induces an electric field in the EAP device. This specific behavior makes such materials highly attractive for the construction of actuators and sensors in various application areas. The electromechanical behaviour of electroactive polymers can be described by a third-order coupling tensor, which represents the sensitivity of mechanical stresses concerning the electric field, i.e., it establishes a relation between a second-order and a first-order tensor field. Due to this coupling tensor's complexity and the lack of meaningful visualization methods for third-order tensors in general, an interpretation of the tensor is rather difficult. Thus, the central engineering research question that this contribution deals with is a deeper understanding of electromechanical coupling by analyzing the third-order coupling tensor with the help of specific visualization methods. Starting with a deviatoric decomposition of the tensor, the multipoles of each deviator are visualized, which allows a first insight into this highly complex third-order tensor. In the present contribution, four examples, including electromechanical coupling, are simulated within a finite element framework and subsequently analyzed using the tensor visualization method.",
"title": "Electromechanical Coupling in Electroactive Polymers – a Visual Analysis of a Third-Order Tensor Field",
"normalizedTitle": "Electromechanical Coupling in Electroactive Polymers – a Visual Analysis of a Third-Order Tensor Field",
"fno": "09905473",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Tensors",
"Couplings",
"Visualization",
"Strain",
"Behavioral Sciences",
"Shape",
"Plastics",
"Deviatoric Decomposition",
"Electro Active Polymer",
"Tensor Visualization",
"Third Order Tensor"
],
"authors": [
{
"givenName": "Chiara",
"surname": "Hergl",
"fullName": "Chiara Hergl",
"affiliation": "Institute of Computer Science, Leipzig University, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Carina",
"surname": "Witt",
"fullName": "Carina Witt",
"affiliation": "Institute of Mechanics, TU Dortmund, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Baldwin",
"surname": "Nsonga",
"fullName": "Baldwin Nsonga",
"affiliation": "Institute of Computer Science, Leipzig University, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Andreas",
"surname": "Menzel",
"fullName": "Andreas Menzel",
"affiliation": "Institute of Mechanics, TU Dortmund, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gerik",
"surname": "Scheuermann",
"fullName": "Gerik Scheuermann",
"affiliation": "Institute of Computer Science, Leipzig University, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-09-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccis/2012/4789/0/4789a033",
"title": "Anisotropic Mechanical-Hydraulic Coupling for Deep Buried Tunnel in Soft Rock with Rich Water",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2012/4789a033/12OmNxdVgOc",
"parentPublication": {
"id": "proceedings/iccis/2012/4789/0",
"title": "2012 Fourth International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scivis/2015/9785/0/07429491",
"title": "Feature-based tensor field visualization for fiber reinforced polymers",
"doi": null,
"abstractUrl": "/proceedings-article/scivis/2015/07429491/12OmNxuXcAB",
"parentPublication": {
"id": "proceedings/scivis/2015/9785/0",
"title": "2015 IEEE Scientific Visualization Conference (SciVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tq/5555/01/09787357",
"title": "Blockchain-based Transparency Framework for Privacy Preserving Third-party Services",
"doi": null,
"abstractUrl": "/journal/tq/5555/01/09787357/1DSuouD5lh6",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/5555/01/09854153",
"title": "When Behavior Analysis Meets Social Network Alignment",
"doi": null,
"abstractUrl": "/journal/tk/5555/01/09854153/1FJ0RquRClq",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsn-s/2022/0260/0/026000a025",
"title": "Reliability of Google’s Tensor Processing Units for Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/dsn-s/2022/026000a025/1FiytsaT0m4",
"parentPublication": {
"id": "proceedings/dsn-s/2022/0260/0",
"title": "2022 52nd Annual IEEE/IFIP International Conference on Dependable Systems and Networks - Supplemental Volume (DSN-S)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2023/01/09931992",
"title": "Dissecting Tensor Cores via Microbenchmarks: Latency, Throughput and Numeric Behaviors",
"doi": null,
"abstractUrl": "/journal/td/2023/01/09931992/1HQ8fynB0di",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2022/5417/0/541700a444",
"title": "Mining Composite Spatio-Temporal Lifestyle Patterns from Geotagged Social Data",
"doi": null,
"abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2022/541700a444/1HcmQIdzjLq",
"parentPublication": {
"id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2022/5417/0",
"title": "2022 IEEE International Conferences on Internet of Things (iThings) and IEEE Green Computing & Communications (GreenCom) and IEEE Cyber, Physical & Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2022/5099/0/509900a241",
"title": "Kernel-based Hybrid Interpretable Transformer for High-frequency Stock Movement Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2022/509900a241/1KpCkb26rJu",
"parentPublication": {
"id": "proceedings/icdm/2022/5099/0",
"title": "2022 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09904441",
"articleId": "1H0GjYseFs4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09904859",
"articleId": "1H2ljAppvkA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1H3ZXlW9cIM",
"name": "ttg555501-09905473s1-supp1-3209328.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09905473s1-supp1-3209328.pdf",
"extension": "pdf",
"size": "2.03 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1H0GdxnVnws",
"doi": "10.1109/TVCG.2022.3209963",
"abstract": "We propose neural global illumination, a novel method for fast rendering full global illumination in static scenes with dynamic viewpoint and area lighting. The key idea of our method is to utilize a deep rendering network to model the complex mapping from each shading point to global illumination. To efficiently learn the mapping, we propose a neural-network-friendly input representation including attributes of each shading point, viewpoint information, and a combinational lighting representation that enables high-quality fitting with a compact neural network. To synthesize high-frequency global illumination effects, we transform the low-dimension input to higher-dimension space by positional encoding and model the rendering network as a deep fully-connected network. Besides, we feed a screen-space neural buffer to our rendering network to share global information between objects in the screen-space to each shading point. We have demonstrated our neural global illumination method in rendering a wide variety of scenes exhibiting complex and all-frequency global illumination effects such as multiple-bounce glossy interreflection, color bleeding, and caustics.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose neural global illumination, a novel method for fast rendering full global illumination in static scenes with dynamic viewpoint and area lighting. The key idea of our method is to utilize a deep rendering network to model the complex mapping from each shading point to global illumination. To efficiently learn the mapping, we propose a neural-network-friendly input representation including attributes of each shading point, viewpoint information, and a combinational lighting representation that enables high-quality fitting with a compact neural network. To synthesize high-frequency global illumination effects, we transform the low-dimension input to higher-dimension space by positional encoding and model the rendering network as a deep fully-connected network. Besides, we feed a screen-space neural buffer to our rendering network to share global information between objects in the screen-space to each shading point. We have demonstrated our neural global illumination method in rendering a wide variety of scenes exhibiting complex and all-frequency global illumination effects such as multiple-bounce glossy interreflection, color bleeding, and caustics.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose neural global illumination, a novel method for fast rendering full global illumination in static scenes with dynamic viewpoint and area lighting. The key idea of our method is to utilize a deep rendering network to model the complex mapping from each shading point to global illumination. To efficiently learn the mapping, we propose a neural-network-friendly input representation including attributes of each shading point, viewpoint information, and a combinational lighting representation that enables high-quality fitting with a compact neural network. To synthesize high-frequency global illumination effects, we transform the low-dimension input to higher-dimension space by positional encoding and model the rendering network as a deep fully-connected network. Besides, we feed a screen-space neural buffer to our rendering network to share global information between objects in the screen-space to each shading point. We have demonstrated our neural global illumination method in rendering a wide variety of scenes exhibiting complex and all-frequency global illumination effects such as multiple-bounce glossy interreflection, color bleeding, and caustics.",
"title": "Neural Global Illumination: Interactive Indirect Illumination Prediction under Dynamic Area Lights",
"normalizedTitle": "Neural Global Illumination: Interactive Indirect Illumination Prediction under Dynamic Area Lights",
"fno": "09904431",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Lighting",
"Rendering Computer Graphics",
"Light Sources",
"Reflection",
"Real Time Systems",
"Probes",
"Neural Networks",
"Global Illumination",
"Deep Learning"
],
"authors": [
{
"givenName": "Duan",
"surname": "Gao",
"fullName": "Duan Gao",
"affiliation": "Department of Computer Science and Technology, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haoyuan",
"surname": "Mu",
"fullName": "Haoyuan Mu",
"affiliation": "Department of Computer Science and Technology, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kun",
"surname": "Xu",
"fullName": "Kun Xu",
"affiliation": "Department of Computer Science and Technology, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-09-01 00:00:00",
"pubType": "trans",
"pages": "1-17",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2016/2303/0/2303a057",
"title": "Interactive Screenspace Stream-Compaction Fragment Rendering of Direct Illumination from Area Lights",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2016/2303a057/12OmNCdk2W8",
"parentPublication": {
"id": "proceedings/cw/2016/2303/0",
"title": "2016 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2017/2089/0/2089a056",
"title": "Voxel-Based Interactive Rendering of Translucent Materials under Area Lights Using Sparse Samples",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2017/2089a056/12OmNvDqsQf",
"parentPublication": {
"id": "proceedings/cw/2017/2089/0",
"title": "2017 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2013/5050/0/5050a913",
"title": "Cartoon Rendering Illumination Model Based on Phong",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2013/5050a913/12OmNwoPtun",
"parentPublication": {
"id": "proceedings/icig/2013/5050/0",
"title": "2013 Seventh International Conference on Image and Graphics (ICIG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pbmcv/1995/7021/0/00514684",
"title": "Reflectance analysis under solar illumination",
"doi": null,
"abstractUrl": "/proceedings-article/pbmcv/1995/00514684/12OmNxbW4O4",
"parentPublication": {
"id": "proceedings/pbmcv/1995/7021/0",
"title": "Proceedings of the Workshop on Physics-Based Modeling in Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cadgraphics/2011/4497/0/4497a093",
"title": "Fast Point Based Global Illumination",
"doi": null,
"abstractUrl": "/proceedings-article/cadgraphics/2011/4497a093/12OmNzdoN7B",
"parentPublication": {
"id": "proceedings/cadgraphics/2011/4497/0",
"title": "Computer-Aided Design and Computer Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2010/06/mcg2010060029",
"title": "Advanced Volume Illumination with Unconstrained Light Source Positioning",
"doi": null,
"abstractUrl": "/magazine/cg/2010/06/mcg2010060029/13rRUNvPLcm",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/06/v1576",
"title": "Lattice-Based Volumetric Global Illumination",
"doi": null,
"abstractUrl": "/journal/tg/2007/06/v1576/13rRUxBa55V",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600s8622",
"title": "Modeling Indirect Illumination for Inverse Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600s8622/1H1jdnZPS0g",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2020/01/08951772",
"title": "Origins of Global Illumination",
"doi": null,
"abstractUrl": "/magazine/cg/2020/01/08951772/1goL8Hzhdcs",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a189",
"title": "Deep Consistent Illumination in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a189/1gyslmCJMjK",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09895311",
"articleId": "1GNprsVfaFi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09904441",
"articleId": "1H0GjYseFs4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1H2kYZqli1y",
"name": "ttg555501-09904431s1-supp1-3209963.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09904431s1-supp1-3209963.pdf",
"extension": "pdf",
"size": "6.7 MB",
"__typename": "WebExtraType"
},
{
"id": "1H2kYNxb1e0",
"name": "ttg555501-09904431s1-supp2-3209963.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09904431s1-supp2-3209963.mp4",
"extension": "mp4",
"size": "44.8 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1H0GjYseFs4",
"doi": "10.1109/TVCG.2022.3209414",
"abstract": "In the era of quantitative investment, factor-based investing models are widely adopted in the construction of stock portfolios. These models explain the performance of individual stocks by a set of financial factors, e.g., market beta and company size. In industry, open investment platforms allow the online building of factor-based models, yet set a high bar on the engineering expertise of end-users. State-of-the-art visualization systems integrate the whole factor investing pipeline, but do not directly address domain users' core requests on ranking factors and stocks for portfolio construction. The current model lacks explainability, which downgrades its credibility with stock investors. To fill the gap in modeling, ranking, and visualizing stock time series for factor investment, we designed and implemented a visual analytics system, namely RankFIRST. The system offers built-in support for an established factor collection and a cross-sectional regression model viable for human interpretation. A hierarchical slope graph design is introduced according to the desired characteristics of good factors for stock investment. A novel firework chart is also invented extending the well-known candlestick chart for stock time series. We evaluated the system on the full-scale Chinese stock market data in the recent 30 years. Case studies and controlled user evaluation demonstrate the superiority of our system on factor investing, in comparison to both passive investing on stock indices and existing stock market visual analytics tools.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In the era of quantitative investment, factor-based investing models are widely adopted in the construction of stock portfolios. These models explain the performance of individual stocks by a set of financial factors, e.g., market beta and company size. In industry, open investment platforms allow the online building of factor-based models, yet set a high bar on the engineering expertise of end-users. State-of-the-art visualization systems integrate the whole factor investing pipeline, but do not directly address domain users' core requests on ranking factors and stocks for portfolio construction. The current model lacks explainability, which downgrades its credibility with stock investors. To fill the gap in modeling, ranking, and visualizing stock time series for factor investment, we designed and implemented a visual analytics system, namely RankFIRST. The system offers built-in support for an established factor collection and a cross-sectional regression model viable for human interpretation. A hierarchical slope graph design is introduced according to the desired characteristics of good factors for stock investment. A novel firework chart is also invented extending the well-known candlestick chart for stock time series. We evaluated the system on the full-scale Chinese stock market data in the recent 30 years. Case studies and controlled user evaluation demonstrate the superiority of our system on factor investing, in comparison to both passive investing on stock indices and existing stock market visual analytics tools.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In the era of quantitative investment, factor-based investing models are widely adopted in the construction of stock portfolios. These models explain the performance of individual stocks by a set of financial factors, e.g., market beta and company size. In industry, open investment platforms allow the online building of factor-based models, yet set a high bar on the engineering expertise of end-users. State-of-the-art visualization systems integrate the whole factor investing pipeline, but do not directly address domain users' core requests on ranking factors and stocks for portfolio construction. The current model lacks explainability, which downgrades its credibility with stock investors. To fill the gap in modeling, ranking, and visualizing stock time series for factor investment, we designed and implemented a visual analytics system, namely RankFIRST. The system offers built-in support for an established factor collection and a cross-sectional regression model viable for human interpretation. A hierarchical slope graph design is introduced according to the desired characteristics of good factors for stock investment. A novel firework chart is also invented extending the well-known candlestick chart for stock time series. We evaluated the system on the full-scale Chinese stock market data in the recent 30 years. Case studies and controlled user evaluation demonstrate the superiority of our system on factor investing, in comparison to both passive investing on stock indices and existing stock market visual analytics tools.",
"title": "RankFIRST: Visual Analysis for Factor Investment By Ranking Stock Timeseries",
"normalizedTitle": "RankFIRST: Visual Analysis for Factor Investment By Ranking Stock Timeseries",
"fno": "09904441",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Investment",
"Portfolios",
"Time Series Analysis",
"Data Visualization",
"Stock Markets",
"Visual Analytics",
"Analytical Models",
"Stock Market",
"Factor Investing",
"Visual Analysis"
],
"authors": [
{
"givenName": "Huijie",
"surname": "Guo",
"fullName": "Huijie Guo",
"affiliation": "SKLSDE and Beijing Advanced Innovation Center for Big Data and Brain Computing, School of Computer Science and Engineering, Beihang University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Meijun",
"surname": "Liu",
"fullName": "Meijun Liu",
"affiliation": "SKLSDE and Beijing Advanced Innovation Center for Big Data and Brain Computing, School of Computer Science and Engineering, Beihang University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bowen",
"surname": "Yang",
"fullName": "Bowen Yang",
"affiliation": "SKLSDE and Beijing Advanced Innovation Center for Big Data and Brain Computing, School of Computer Science and Engineering, Beihang University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ye",
"surname": "Sun",
"fullName": "Ye Sun",
"affiliation": "SKLSDE and Beijing Advanced Innovation Center for Big Data and Brain Computing, School of Computer Science and Engineering, Beihang University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huamin",
"surname": "Qu",
"fullName": "Huamin Qu",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lei",
"surname": "Shi",
"fullName": "Lei Shi",
"affiliation": "SKLSDE and Beijing Advanced Innovation Center for Big Data and Brain Computing, School of Computer Science and Engineering, Beihang University, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-09-01 00:00:00",
"pubType": "trans",
"pages": "1-10",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/imis/2014/4331/0/4331a575",
"title": "Analysts' Forecasts and Institutional Investors' Behavior",
"doi": null,
"abstractUrl": "/proceedings-article/imis/2014/4331a575/12OmNwDSdgj",
"parentPublication": {
"id": "proceedings/imis/2014/4331/0",
"title": "2014 Eighth International Conference on Innovative Mobile and Internet Services in Ubiquitous Computing (IMIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cit/2014/6239/0/6239a482",
"title": "Analysis on Stock Market Volatility with Collective Human Behaviors in Online Message Board",
"doi": null,
"abstractUrl": "/proceedings-article/cit/2014/6239a482/12OmNxVlTDL",
"parentPublication": {
"id": "proceedings/cit/2014/6239/0",
"title": "2014 IEEE International Conference on Computer and Information Technology (CIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cso/2012/1365/0/06274787",
"title": "Individual Pension Account Fund Investing Strategy",
"doi": null,
"abstractUrl": "/proceedings-article/cso/2012/06274787/12OmNxZBSyI",
"parentPublication": {
"id": "proceedings/cso/2012/1365/0",
"title": "2012 Fifth International Joint Conference on Computational Sciences and Optimization (CSO)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/taai/2012/4976/0/06394998",
"title": "A Novel Methodology for Stock Investment Using Episode Mining and Technical Indicators",
"doi": null,
"abstractUrl": "/proceedings-article/taai/2012/06394998/12OmNyeECAt",
"parentPublication": {
"id": "proceedings/taai/2012/4976/0",
"title": "2012 Conference on Technologies and Applications of Artificial Intelligence (TAAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciii/2009/3876/2/3876b046",
"title": "Application of Gray-Situation Decision to the Investment of Real Estate and Stock",
"doi": null,
"abstractUrl": "/proceedings-article/iciii/2009/3876b046/12OmNywfKxL",
"parentPublication": {
"id": "proceedings/iciii/2009/3876/2",
"title": "International Conference on Information Management, Innovation Management and Industrial Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciii/2010/4279/1/4279a225",
"title": "A Research on the Long-term Investment Value of the Stock in SME Board in China",
"doi": null,
"abstractUrl": "/proceedings-article/iciii/2010/4279a225/12OmNzRqdJu",
"parentPublication": {
"id": "proceedings/iciii/2010/4279/1",
"title": "International Conference on Information Management, Innovation Management and Industrial Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imis/2016/0984/0/0984a551",
"title": "The Impact of Stock Prices, Risks and Business Cycle on Overconfidence",
"doi": null,
"abstractUrl": "/proceedings-article/imis/2016/0984a551/12OmNzcPAhQ",
"parentPublication": {
"id": "proceedings/imis/2016/0984/0",
"title": "2016 10th International Conference on Innovative Mobile and Internet Services in Ubiquitous Computing (IMIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2017/2715/0/08258435",
"title": "An augmented fama and french three-factor model using social interaction",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2017/08258435/17D45VN31go",
"parentPublication": {
"id": "proceedings/big-data/2017/2715/0",
"title": "2017 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2018/7449/0/744900a275",
"title": "A Stock-Movement Aware Approach for Discovering Investors' Personalized Preferences in Stock Markets",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2018/744900a275/17D45VTRoxt",
"parentPublication": {
"id": "proceedings/ictai/2018/7449/0",
"title": "2018 IEEE 30th International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icbk/2018/9125/0/912500a041",
"title": "Deep Co-Investment Network Learning for Financial Assets",
"doi": null,
"abstractUrl": "/proceedings-article/icbk/2018/912500a041/17D45XeKgua",
"parentPublication": {
"id": "proceedings/icbk/2018/9125/0",
"title": "2018 IEEE International Conference on Big Knowledge (ICBK)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09904431",
"articleId": "1H0GdxnVnws",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09905473",
"articleId": "1H2lfN8dsT6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1H2l4qWfK7K",
"name": "ttg555501-09904441s1-supp2-3209414.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09904441s1-supp2-3209414.mp4",
"extension": "mp4",
"size": "81.8 MB",
"__typename": "WebExtraType"
},
{
"id": "1H2l5dnpOik",
"name": "ttg555501-09904441s1-supp1-3209414.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09904441s1-supp1-3209414.pdf",
"extension": "pdf",
"size": "1.21 MB",
"__typename": "WebExtraType"
},
{
"id": "1H2l54Ntwxq",
"name": "ttg555501-09904441s1-supp3-3209414.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09904441s1-supp3-3209414.mp4",
"extension": "mp4",
"size": "23.1 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1GNprsVfaFi",
"doi": "10.1109/TVCG.2022.3207929",
"abstract": "Current work on using visual analytics to determine causal relations among variables has mostly been based on the concept of counterfactuals. As such the derived static causal networks do not take into account the effect of time as an indicator. However, knowing the time delay of a causal relation can be crucial as it instructs how and when actions should be taken. Yet, similar to static causality, deriving causal relations from observational time-series data, as opposed to designed experiments, is not a straightforward process. It can greatly benefit from human insight to break ties and resolve errors. We hence propose a set of visual analytics methods that allow humans to participate in the discovery of causal relations associated with windows of time delay. Specifically, we leverage a well-established method, logic-based causality, to enable analysts to test the significance of potential causes and measure their influences toward a certain effect. Furthermore, since an effect can be a cause of other effects, we allow users to aggregate different temporal cause-effect relations found with our method into a visual flow diagram to enable the discovery of temporal causal networks. To demonstrate the effectiveness of our methods we constructed a prototype system named DOMINO and showcase it via a number of case studies using real-world datasets. Finally, we also used DOMINO to conduct several evaluations with human analysts from different science domains in order to gain feedback on the utility of our system in practical scenarios.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Current work on using visual analytics to determine causal relations among variables has mostly been based on the concept of counterfactuals. As such the derived static causal networks do not take into account the effect of time as an indicator. However, knowing the time delay of a causal relation can be crucial as it instructs how and when actions should be taken. Yet, similar to static causality, deriving causal relations from observational time-series data, as opposed to designed experiments, is not a straightforward process. It can greatly benefit from human insight to break ties and resolve errors. We hence propose a set of visual analytics methods that allow humans to participate in the discovery of causal relations associated with windows of time delay. Specifically, we leverage a well-established method, logic-based causality, to enable analysts to test the significance of potential causes and measure their influences toward a certain effect. Furthermore, since an effect can be a cause of other effects, we allow users to aggregate different temporal cause-effect relations found with our method into a visual flow diagram to enable the discovery of temporal causal networks. To demonstrate the effectiveness of our methods we constructed a prototype system named DOMINO and showcase it via a number of case studies using real-world datasets. Finally, we also used DOMINO to conduct several evaluations with human analysts from different science domains in order to gain feedback on the utility of our system in practical scenarios.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Current work on using visual analytics to determine causal relations among variables has mostly been based on the concept of counterfactuals. As such the derived static causal networks do not take into account the effect of time as an indicator. However, knowing the time delay of a causal relation can be crucial as it instructs how and when actions should be taken. Yet, similar to static causality, deriving causal relations from observational time-series data, as opposed to designed experiments, is not a straightforward process. It can greatly benefit from human insight to break ties and resolve errors. We hence propose a set of visual analytics methods that allow humans to participate in the discovery of causal relations associated with windows of time delay. Specifically, we leverage a well-established method, logic-based causality, to enable analysts to test the significance of potential causes and measure their influences toward a certain effect. Furthermore, since an effect can be a cause of other effects, we allow users to aggregate different temporal cause-effect relations found with our method into a visual flow diagram to enable the discovery of temporal causal networks. To demonstrate the effectiveness of our methods we constructed a prototype system named DOMINO and showcase it via a number of case studies using real-world datasets. Finally, we also used DOMINO to conduct several evaluations with human analysts from different science domains in order to gain feedback on the utility of our system in practical scenarios.",
"title": "DOMINO: Visual Causal Reasoning With Time-Dependent Phenomena",
"normalizedTitle": "DOMINO: Visual Causal Reasoning With Time-Dependent Phenomena",
"fno": "09895311",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visual Analytics",
"Time Series Analysis",
"Delay Effects",
"Task Analysis",
"Visual Systems",
"Testing",
"Prototypes",
"Causality Analysis",
"Hypothesis Generation",
"Hypothesis Testing",
"Time Series",
"Visual Analytics"
],
"authors": [
{
"givenName": "Jun",
"surname": "Wang",
"fullName": "Jun Wang",
"affiliation": "Visual Analytics and Imaging Lab at the Computer Science Department, Stony Brook University, Stony Brook, NY, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Klaus",
"surname": "Mueller",
"fullName": "Klaus Mueller",
"affiliation": "Visual Analytics and Imaging Lab at the Computer Science Department, Stony Brook University, Stony Brook, NY, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-09-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2011/348/0/06011916",
"title": "Causal flow",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06011916/12OmNxxvANK",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2002/1849/0/18490375",
"title": "TimeSleuth: A Tool for Discovering Causal and Temporal Rules",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2002/18490375/12OmNyrIaJZ",
"parentPublication": {
"id": "proceedings/ictai/2002/1849/0",
"title": "14th IEEE International Conference on Tools with Artificial Intelligence, 2002. (ICTAI 2002). Proceedings.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2017/3163/0/08585647",
"title": "Visual Causality Analysis Made Practical",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2017/08585647/17D45VTRouU",
"parentPublication": {
"id": "proceedings/vast/2017/3163/0",
"title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/bd/2018/04/07970191",
"title": "pg-Causality: Identifying Spatiotemporal Causal Pathways for Air Pollutants with Urban Big Data",
"doi": null,
"abstractUrl": "/journal/bd/2018/04/07970191/17D45Xq6dD1",
"parentPublication": {
"id": "trans/bd",
"title": "IEEE Transactions on Big Data",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ic/2022/01/09706608",
"title": "CausalKG: Causal Knowledge Graph Explainability Using Interventional and Counterfactual Reasoning",
"doi": null,
"abstractUrl": "/magazine/ic/2022/01/09706608/1APlAUMvn4A",
"parentPublication": {
"id": "mags/ic",
"title": "IEEE Internet Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ickg/2020/8156/0/09194510",
"title": "Causal Extraction from the Literature of Pressure Injury and Risk Factors",
"doi": null,
"abstractUrl": "/proceedings-article/ickg/2020/09194510/1n2nkO9teN2",
"parentPublication": {
"id": "proceedings/ickg/2020/8156/0",
"title": "2020 IEEE International Conference on Knowledge Graph (ICKG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09216629",
"title": "A Visual Analytics Approach for Exploratory Causal Analysis: Exploration, Validation, and Applications",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09216629/1nJsGFc8lUY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222294",
"title": "Visual Causality Analysis of Event Sequence Data",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222294/1nTqOCPOdTq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2020/8316/0/831600a972",
"title": "Inductive Granger Causal Modeling for Multivariate Time Series",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2020/831600a972/1r54F2RY7qE",
"parentPublication": {
"id": "proceedings/icdm/2020/8316/0",
"title": "2020 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09557222",
"title": "Compass: Towards Better Causal Analysis of Urban Time Series",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09557222/1xlvZ1jrwLC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09894081",
"articleId": "1GIqtQDhf8I",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09904431",
"articleId": "1H0GdxnVnws",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1GP3KtbWMRG",
"name": "ttg555501-09895311s1-supp1-3207929.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09895311s1-supp1-3207929.mp4",
"extension": "mp4",
"size": "30.6 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1GIqrCx8RCE",
"doi": "10.1109/TVCG.2022.3207157",
"abstract": "Continuous locomotion in VR provides uninterrupted optical flow, which mimics real-world locomotion and supports path integration. However, optical flow limits the maximum speed and acceleration that can be effectively used without inducing cybersickness. In contrast, teleportation provides neither optical flow nor acceleration cues, and users can jump to any length without increasing cybersickness. However, teleportation cannot support continuous spatial updating and can increase disorientation. Thus, we designed ‘HyperJump’ in an attempt to merge benefits from continuous locomotion and teleportation. HyperJump adds iterative jumps every half a second on top of the continuous movement and was hypothesized to facilitate faster travel without compromising spatial awareness/orientation. In a user study, Participants travelled around a naturalistic virtual city with and without HyperJump (equivalent maximum speed). They followed waypoints to new landmarks, stopped near them and pointed back to all previously visited landmarks in random order. HyperJump was added to two continuous locomotion interfaces (controller- and leaning-based). Participants had better spatial awareness/orientation with leaning-based interfaces compared to controller-based (assessed via rapid pointing). With HyperJump, participants travelled significantly faster, while staying on the desired course without impairing their spatial knowledge. This provides evidence that optical flow can be effectively limited such that it facilitates faster travel without compromising spatial orientation. In future design iterations, we plan to utilize audio-visual effects to support jumping metaphors that help users better anticipate and interpret jumps, and use much larger virtual environments requiring faster speeds, where cybersickness will become increasingly prevalent and thus teleporting will become more important.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Continuous locomotion in VR provides uninterrupted optical flow, which mimics real-world locomotion and supports path integration. However, optical flow limits the maximum speed and acceleration that can be effectively used without inducing cybersickness. In contrast, teleportation provides neither optical flow nor acceleration cues, and users can jump to any length without increasing cybersickness. However, teleportation cannot support continuous spatial updating and can increase disorientation. Thus, we designed ‘HyperJump’ in an attempt to merge benefits from continuous locomotion and teleportation. HyperJump adds iterative jumps every half a second on top of the continuous movement and was hypothesized to facilitate faster travel without compromising spatial awareness/orientation. In a user study, Participants travelled around a naturalistic virtual city with and without HyperJump (equivalent maximum speed). They followed waypoints to new landmarks, stopped near them and pointed back to all previously visited landmarks in random order. HyperJump was added to two continuous locomotion interfaces (controller- and leaning-based). Participants had better spatial awareness/orientation with leaning-based interfaces compared to controller-based (assessed via rapid pointing). With HyperJump, participants travelled significantly faster, while staying on the desired course without impairing their spatial knowledge. This provides evidence that optical flow can be effectively limited such that it facilitates faster travel without compromising spatial orientation. In future design iterations, we plan to utilize audio-visual effects to support jumping metaphors that help users better anticipate and interpret jumps, and use much larger virtual environments requiring faster speeds, where cybersickness will become increasingly prevalent and thus teleporting will become more important.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Continuous locomotion in VR provides uninterrupted optical flow, which mimics real-world locomotion and supports path integration. However, optical flow limits the maximum speed and acceleration that can be effectively used without inducing cybersickness. In contrast, teleportation provides neither optical flow nor acceleration cues, and users can jump to any length without increasing cybersickness. However, teleportation cannot support continuous spatial updating and can increase disorientation. Thus, we designed ‘HyperJump’ in an attempt to merge benefits from continuous locomotion and teleportation. HyperJump adds iterative jumps every half a second on top of the continuous movement and was hypothesized to facilitate faster travel without compromising spatial awareness/orientation. In a user study, Participants travelled around a naturalistic virtual city with and without HyperJump (equivalent maximum speed). They followed waypoints to new landmarks, stopped near them and pointed back to all previously visited landmarks in random order. HyperJump was added to two continuous locomotion interfaces (controller- and leaning-based). Participants had better spatial awareness/orientation with leaning-based interfaces compared to controller-based (assessed via rapid pointing). With HyperJump, participants travelled significantly faster, while staying on the desired course without impairing their spatial knowledge. This provides evidence that optical flow can be effectively limited such that it facilitates faster travel without compromising spatial orientation. In future design iterations, we plan to utilize audio-visual effects to support jumping metaphors that help users better anticipate and interpret jumps, and use much larger virtual environments requiring faster speeds, where cybersickness will become increasingly prevalent and thus teleporting will become more important.",
"title": "Integrating Continuous and Teleporting VR Locomotion Into a Seamless ‘HyperJump’ Paradigm",
"normalizedTitle": "Integrating Continuous and Teleporting VR Locomotion Into a Seamless ‘HyperJump’ Paradigm",
"fno": "09894041",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Teleportation",
"Cybersickness",
"Optical Flow",
"Task Analysis",
"Navigation",
"Animation",
"Virtual Environments",
"Leaning",
"Locomotion",
"Semi Continuous Locomotion",
"Spatial Updating",
"Teleportation",
"Virtual Reality"
],
"authors": [
{
"givenName": "Ashu",
"surname": "Adhikari",
"fullName": "Ashu Adhikari",
"affiliation": "School of Interactive Arts & Technology, Simon Fraser University, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Daniel",
"surname": "Zielasko",
"fullName": "Daniel Zielasko",
"affiliation": "University of Trier, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ivan",
"surname": "Aguilar",
"fullName": "Ivan Aguilar",
"affiliation": "School of Interactive Arts & Technology, Simon Fraser University, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alexander",
"surname": "Bretin",
"fullName": "Alexander Bretin",
"affiliation": "School of Interactive Arts & Technology, Simon Fraser University, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ernst",
"surname": "Kruijff",
"fullName": "Ernst Kruijff",
"affiliation": "Institute of Visual Computing, Bonn-Rhein-Sieg University of Applied Sciences, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Markus von der",
"surname": "Heyde",
"fullName": "Markus von der Heyde",
"affiliation": "vdH-IT and the School of Interactive Arts & Technology, Simon Fraser University, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bernhard E.",
"surname": "Riecke",
"fullName": "Bernhard E. Riecke",
"affiliation": "School of Interactive Arts & Technology, Simon Fraser University, Canada",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-09-01 00:00:00",
"pubType": "trans",
"pages": "1-17",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2018/3365/0/08446130",
"title": "Rapid, Continuous Movement Between Nodes as an Accessible Virtual Reality Locomotion Technique",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446130/13bd1f3HvEx",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2018/7123/0/08493414",
"title": "Comparison of Teleportation and Fixed Track Driving in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2018/08493414/14tNJnrhcIw",
"parentPublication": {
"id": "proceedings/vs-games/2018/7123/0",
"title": "2018 10th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09737429",
"title": "Intentional Head-Motion Assisted Locomotion for Reducing Cybersickness",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09737429/1BQidPzNjBS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a693",
"title": "Systematic Design Space Exploration of Discrete Virtual Rotations in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a693/1CJbHGJZxeM",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a794",
"title": "An Investigation on the Relationship between Cybersickness and Heart Rate Variability When Navigating a Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a794/1J7We4du3FC",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a501",
"title": "Exploring Three-Dimensional Locomotion Techniques in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a501/1J7WrBbMYEg",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a530",
"title": "The Evaluation of Gait-Free Locomotion Methods with Eye Movement in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a530/1J7WtHqguHu",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090536",
"title": "Elastic-Move: Passive Haptic Device with Force Feedback for Virtual Reality Locomotion",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090536/1jIxqFQXvSE",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a373",
"title": "Continuous vs. Discontinuous (Teleport) Locomotion in VR: How Implications can Provide both Benefits and Disadvantages",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a373/1tnWQrR1hAs",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a393",
"title": "Effects of a handlebar on standing VR locomotion",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a393/1tnX2vv1TS8",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09894103",
"articleId": "1GIqpPbyH7y",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09894081",
"articleId": "1GIqtQDhf8I",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1GNpstTVfWM",
"name": "ttg555501-09894041s1-supp1-3207157.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09894041s1-supp1-3207157.mp4",
"extension": "mp4",
"size": "53.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1GIqtQDhf8I",
"doi": "10.1109/TVCG.2022.3207241",
"abstract": "What happens if we put vision and touch into conflict? Which modality “wins”? Although several previous studies have addressed this topic, they have solely focused on integration of vision and touch for low-level object properties (such as curvature, slant, or depth). In the present study, we introduce a multimodal mixed-reality setup based on real-time hand-tracking, which was used to display real-world, haptic exploration of objects in a virtual environment through a head-mounted-display (HMD). With this setup we studied multimodal conflict situations of objects varying along higher-level, parametrically-controlled global shape properties. Participants explored these objects in both unimodal and multimodal settings with the latter including congruent and incongruent conditions and differing instructions for weighting the input modalities. Results demonstrated a surprisingly clear touch dominance throughout all experiments, which in addition was only marginally influenceable through instructions to bias their modality weighting. We also present an initial analysis of the hand-tracking patterns that illustrates the potential for our setup to investigate exploration behavior in more detail.",
"abstracts": [
{
"abstractType": "Regular",
"content": "What happens if we put vision and touch into conflict? Which modality “wins”? Although several previous studies have addressed this topic, they have solely focused on integration of vision and touch for low-level object properties (such as curvature, slant, or depth). In the present study, we introduce a multimodal mixed-reality setup based on real-time hand-tracking, which was used to display real-world, haptic exploration of objects in a virtual environment through a head-mounted-display (HMD). With this setup we studied multimodal conflict situations of objects varying along higher-level, parametrically-controlled global shape properties. Participants explored these objects in both unimodal and multimodal settings with the latter including congruent and incongruent conditions and differing instructions for weighting the input modalities. Results demonstrated a surprisingly clear touch dominance throughout all experiments, which in addition was only marginally influenceable through instructions to bias their modality weighting. We also present an initial analysis of the hand-tracking patterns that illustrates the potential for our setup to investigate exploration behavior in more detail.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "What happens if we put vision and touch into conflict? Which modality “wins”? Although several previous studies have addressed this topic, they have solely focused on integration of vision and touch for low-level object properties (such as curvature, slant, or depth). In the present study, we introduce a multimodal mixed-reality setup based on real-time hand-tracking, which was used to display real-world, haptic exploration of objects in a virtual environment through a head-mounted-display (HMD). With this setup we studied multimodal conflict situations of objects varying along higher-level, parametrically-controlled global shape properties. Participants explored these objects in both unimodal and multimodal settings with the latter including congruent and incongruent conditions and differing instructions for weighting the input modalities. Results demonstrated a surprisingly clear touch dominance throughout all experiments, which in addition was only marginally influenceable through instructions to bias their modality weighting. We also present an initial analysis of the hand-tracking patterns that illustrates the potential for our setup to investigate exploration behavior in more detail.",
"title": "Putting Vision and Touch Into Conflict: Results from a Multimodal Mixed Reality Setup",
"normalizedTitle": "Putting Vision and Touch Into Conflict: Results from a Multimodal Mixed Reality Setup",
"fno": "09894081",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Shape",
"Visualization",
"Haptic Interfaces",
"Resists",
"Task Analysis",
"Real Time Systems",
"Mixed Reality",
"Perception",
"Vision",
"Haptics",
"Dominance",
"Hand Tracking",
"Multisensory Perception"
],
"authors": [
{
"givenName": "Hyeokmook",
"surname": "Kang",
"fullName": "Hyeokmook Kang",
"affiliation": "Department of Brain and Cognitive Engineering, Korea University, Seoul, Republic of Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Taeho",
"surname": "Kang",
"fullName": "Taeho Kang",
"affiliation": "Department of Brain and Cognitive Engineering, Korea University, Seoul, Republic of Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Christian",
"surname": "Wallraven",
"fullName": "Christian Wallraven",
"affiliation": "Department of Brain and Cognitive Engineering and Department of Artificial Intelligence, Korea University, Seoul, Republic of Korea",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-09-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/acii/2013/5048/0/5048a701",
"title": "Affective Touch at a Distance",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2013/5048a701/12OmNAle6rO",
"parentPublication": {
"id": "proceedings/acii/2013/5048/0",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2002/1489/0/14890097",
"title": "Integrating Multimodal Information about Surface Texture via a Probe: Relative Contributions of Haptic and Touch-Produced Sound Sources",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2002/14890097/12OmNBBzohw",
"parentPublication": {
"id": "proceedings/haptics/2002/1489/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811019",
"title": "Virtual Humans That Touch Back: Enhancing Nonverbal Communication with Virtual Humans through Bidirectional Touch",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811019/12OmNwMXnsz",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811054",
"title": "A Multimodal Interface for Artifact's Exploration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811054/12OmNx4yvAp",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811061",
"title": "A VR Multimodal Interface for Small Artifacts in the Gold Museum",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811061/12OmNx76TSt",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549376",
"title": "Touch experience in augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549376/12OmNy2agRt",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2017/03/07811300",
"title": "Social Touch Technology: A Survey of Haptic Technology for Social Touch",
"doi": null,
"abstractUrl": "/journal/th/2017/03/07811300/13rRUxNW1TZ",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2017/01/07360126",
"title": "Physiological Responses to Affective Tele-Touch during Induced Emotional Stimuli",
"doi": null,
"abstractUrl": "/journal/ta/2017/01/07360126/13rRUxYINdD",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2018/04/07752812",
"title": "Combining Facial Expression and Touch for Perceiving Emotional Valence",
"doi": null,
"abstractUrl": "/journal/ta/2018/04/07752812/17D45XeKgr8",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a102",
"title": "Studying the Role of Self and External Touch in the Appropriation of Dysmorphic Hands",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a102/1JrRiazvJ1m",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09894041",
"articleId": "1GIqrCx8RCE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09895311",
"articleId": "1GNprsVfaFi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1GIqpC6j7na",
"doi": "10.1109/TVCG.2022.3207357",
"abstract": "Existing research on immersive analytics to support the sensemaking process focuses on single-session sensemaking tasks. However, in the wild, sensemaking can take days or months to complete. In order to understand the full benefits of immersive analytic systems, we need to understand how immersive analytic systems provide flexibility for the dynamic nature of the sensemaking process. In our work, we build upon an existing immersive analytic system – Immersive Space to Think, to evaluate how immersive analytic systems can support sensemaking tasks over time. We conducted a user study with eight participants with three separate analysis sessions each. We found significant differences between analysis strategies between sessions one, two, and three, which suggest that immersive space to think can benefit analysts during multiple stages in the sensemaking process.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Existing research on immersive analytics to support the sensemaking process focuses on single-session sensemaking tasks. However, in the wild, sensemaking can take days or months to complete. In order to understand the full benefits of immersive analytic systems, we need to understand how immersive analytic systems provide flexibility for the dynamic nature of the sensemaking process. In our work, we build upon an existing immersive analytic system – Immersive Space to Think, to evaluate how immersive analytic systems can support sensemaking tasks over time. We conducted a user study with eight participants with three separate analysis sessions each. We found significant differences between analysis strategies between sessions one, two, and three, which suggest that immersive space to think can benefit analysts during multiple stages in the sensemaking process.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Existing research on immersive analytics to support the sensemaking process focuses on single-session sensemaking tasks. However, in the wild, sensemaking can take days or months to complete. In order to understand the full benefits of immersive analytic systems, we need to understand how immersive analytic systems provide flexibility for the dynamic nature of the sensemaking process. In our work, we build upon an existing immersive analytic system – Immersive Space to Think, to evaluate how immersive analytic systems can support sensemaking tasks over time. We conducted a user study with eight participants with three separate analysis sessions each. We found significant differences between analysis strategies between sessions one, two, and three, which suggest that immersive space to think can benefit analysts during multiple stages in the sensemaking process.",
"title": "Exploring the Evolution of Sensemaking Strategies in Immersive Space to Think",
"normalizedTitle": "Exploring the Evolution of Sensemaking Strategies in Immersive Space to Think",
"fno": "09894094",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Task Analysis",
"Data Visualization",
"Cognition",
"Prototypes",
"Visual Analytics",
"Three Dimensional Displays",
"Keyboards",
"Human Computer Interaction",
"Immersive Analytics",
"Virtual Reality",
"Information Visualization",
"Sensemaking"
],
"authors": [
{
"givenName": "Kylie",
"surname": "Davidson",
"fullName": "Kylie Davidson",
"affiliation": "Department of Computer Science, Virginia Polytechnic Institute and State University, Blacksburg, VA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lee",
"surname": "Lisle",
"fullName": "Lee Lisle",
"affiliation": "Department of Computer Science, Virginia Polytechnic Institute and State University, Blacksburg, VA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kirsten",
"surname": "Whitley",
"fullName": "Kirsten Whitley",
"affiliation": "US Department of Defense, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Doug A.",
"surname": "Bowman",
"fullName": "Doug A. Bowman",
"affiliation": "Department of Computer Science, Virginia Polytechnic Institute and State University, Blacksburg, VA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chris",
"surname": "North",
"fullName": "Chris North",
"affiliation": "Department of Computer Science, Virginia Polytechnic Institute and State University, Blacksburg, VA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-09-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vast/2012/4752/0/06400559",
"title": "Analyst's Workspace: An embodied sensemaking environment for large, high-resolution displays",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2012/06400559/12OmNwF0BJt",
"parentPublication": {
"id": "proceedings/vast/2012/4752/0",
"title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eisic/2016/2857/0/07870192",
"title": "How Analysts Think: Think-steps as a Tool for Structuring Sensemaking in Criminal Intelligence Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/eisic/2016/07870192/12OmNxWLTzQ",
"parentPublication": {
"id": "proceedings/eisic/2016/2857/0",
"title": "2016 European Intelligence and Security Informatics Conference (EISIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eisic/2015/8657/0/8657a177",
"title": "Guidelines for Sensemaking in Intelligence Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/eisic/2015/8657a177/12OmNzTYBR1",
"parentPublication": {
"id": "proceedings/eisic/2015/8657/0",
"title": "2015 European Intelligence and Security Informatics Conference (EISIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07194834",
"title": "SensePath: Understanding the Sensemaking Process Through Analytic Provenance",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07194834/13rRUEgarnM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122879",
"title": "Semantic Interaction for Sensemaking: Inferring Analytical Reasoning for Model Steering",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122879/13rRUwdIOUL",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090546",
"title": "[DC] The Immersive Space to Think: Immersive Analytics for Multimedia Data",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090546/1jIxrquhCNO",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090620",
"title": "Evaluating the Benefits of the Immersive Space to Think",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090620/1jIxs5S1PwY",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09382916",
"title": "Exploring the SenseMaking Process through Interactions and fNIRS in Immersive Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09382916/1saZna718yY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a529",
"title": "Sensemaking Strategies with Immersive Space to Think",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a529/1tuAMAuN6kU",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2021/3335/0/333500a181",
"title": "Narrative Sensemaking: Strategies for Narrative Maps Construction",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2021/333500a181/1yXuj3PJXRm",
"parentPublication": {
"id": "proceedings/vis/2021/3335/0",
"title": "2021 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09893374",
"articleId": "1GGLIh8KmSA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09894103",
"articleId": "1GIqpPbyH7y",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1GNprfMS0uc",
"name": "ttg555501-09894094s1-supp1-3207357.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09894094s1-supp1-3207357.pdf",
"extension": "pdf",
"size": "33.4 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1GIqpPbyH7y",
"doi": "10.1109/TVCG.2022.3207147",
"abstract": "Lineup selection is an essential and important task in soccer matches. To win a match, coaches must consider various factors and select appropriate players for a planned formation. Computation-based tools have been proposed to help coaches on this complex task, but they are usually based on over-simplified models on player performances, do not support interactive analysis, and overlook the inputs by coaches. In this paper, we propose a method for visual analytics of soccer lineup selection by tackling two challenges: characterizing essential factors involved in generating optimal lineup, and supporting coach-driven visual analytics of lineup selection. We develop a lineup selection model that integrates such important factors, such as spatial regions of player actions and defensive interactions with opponent players. A visualization system, Team-Builder, is developed to help coaches control the process of lineup generation, explanation, and comparison through multiple coordinated views. The usefulness and effectiveness of our system are demonstrated by two case studies on a real-world soccer event dataset.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Lineup selection is an essential and important task in soccer matches. To win a match, coaches must consider various factors and select appropriate players for a planned formation. Computation-based tools have been proposed to help coaches on this complex task, but they are usually based on over-simplified models on player performances, do not support interactive analysis, and overlook the inputs by coaches. In this paper, we propose a method for visual analytics of soccer lineup selection by tackling two challenges: characterizing essential factors involved in generating optimal lineup, and supporting coach-driven visual analytics of lineup selection. We develop a lineup selection model that integrates such important factors, such as spatial regions of player actions and defensive interactions with opponent players. A visualization system, Team-Builder, is developed to help coaches control the process of lineup generation, explanation, and comparison through multiple coordinated views. The usefulness and effectiveness of our system are demonstrated by two case studies on a real-world soccer event dataset.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Lineup selection is an essential and important task in soccer matches. To win a match, coaches must consider various factors and select appropriate players for a planned formation. Computation-based tools have been proposed to help coaches on this complex task, but they are usually based on over-simplified models on player performances, do not support interactive analysis, and overlook the inputs by coaches. In this paper, we propose a method for visual analytics of soccer lineup selection by tackling two challenges: characterizing essential factors involved in generating optimal lineup, and supporting coach-driven visual analytics of lineup selection. We develop a lineup selection model that integrates such important factors, such as spatial regions of player actions and defensive interactions with opponent players. A visualization system, Team-Builder, is developed to help coaches control the process of lineup generation, explanation, and comparison through multiple coordinated views. The usefulness and effectiveness of our system are demonstrated by two case studies on a real-world soccer event dataset.",
"title": "Team-Builder: Toward More Effective Lineup Selection in Soccer",
"normalizedTitle": "Team-Builder: Toward More Effective Lineup Selection in Soccer",
"fno": "09894103",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Sports",
"Data Visualization",
"Analytical Models",
"Trajectory",
"Computational Modeling",
"Visual Analytics",
"Videos",
"Design Study",
"Lineup Selection",
"Sports Visualization"
],
"authors": [
{
"givenName": "Anqi",
"surname": "Cao",
"fullName": "Anqi Cao",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ji",
"surname": "Lan",
"fullName": "Ji Lan",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiao",
"surname": "Xie",
"fullName": "Xiao Xie",
"affiliation": "Department of Sports Science, Zhejiang University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hongyu",
"surname": "Chen",
"fullName": "Hongyu Chen",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaolong",
"surname": "Zhang",
"fullName": "Xiaolong Zhang",
"affiliation": "College of Information Sciences and Technology, Pennsylvania State University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hui",
"surname": "Zhang",
"fullName": "Hui Zhang",
"affiliation": "Department of Sports Science, Zhejiang University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yingcai",
"surname": "Wu",
"fullName": "Yingcai Wu",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-09-01 00:00:00",
"pubType": "trans",
"pages": "1-16",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vast/2014/6227/0/07042477",
"title": "Feature-driven visual analytics of soccer data",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2014/07042477/12OmNxcMSiK",
"parentPublication": {
"id": "proceedings/vast/2014/6227/0",
"title": "2014 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2015/9711/0/5720a774",
"title": "Injury Mechanism Classification in Soccer Videos",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2015/5720a774/12OmNyQYtu5",
"parentPublication": {
"id": "proceedings/iccvw/2015/9711/0",
"title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2016/05/mcg2016050050",
"title": "Director's Cut: Analysis and Annotation of Soccer Matches",
"doi": null,
"abstractUrl": "/magazine/cg/2016/05/mcg2016050050/13rRUIJcWr3",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2016/10/07492601",
"title": "Discovering Team Structures in Soccer from Spatiotemporal Data",
"doi": null,
"abstractUrl": "/journal/tk/2016/10/07492601/13rRUwjoNxy",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000b830",
"title": "Soccer: Who Has the Ball? Generating Visual Analytics and Player Statistics",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000b830/17D45VObpOM",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bdva/2018/9194/0/08534022",
"title": "Revealing the Invisible: Visual Analytics and Explanatory Storytelling for Advanced Team Sport Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/bdva/2018/08534022/17D45WODasQ",
"parentPublication": {
"id": "proceedings/bdva/2018/9194/0",
"title": "2018 International Symposium on Big Data Visual and Immersive Analytics (BDVA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440804",
"title": "ForVizor: Visualizing Spatio-Temporal Team Formations in Soccer",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440804/17D45WXIkAs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2019/05/08735757",
"title": "Tackling Similarity Search for Soccer Match Analysis: Multimodal Distance Measure and Interactive Query Definition",
"doi": null,
"abstractUrl": "/magazine/cg/2019/05/08735757/1aNOsqnK0Gk",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08795584",
"title": "CourtTime: Generating Actionable Insights into Tennis Matches Using Visual Analytics",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08795584/1csHUeq7TB6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222314",
"title": "PassVizor: Toward Better Understanding of the Dynamics of Soccer Passes",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222314/1nTq1FTwhtC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09894094",
"articleId": "1GIqpC6j7na",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09894041",
"articleId": "1GIqrCx8RCE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1GQIKYT1TnG",
"name": "ttg555501-09894103s1-supp1-3207147.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09894103s1-supp1-3207147.mp4",
"extension": "mp4",
"size": "14.1 MB",
"__typename": "WebExtraType"
},
{
"id": "1GQIKRomxs4",
"name": "ttg555501-09894103s1-supp2-3207147.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09894103s1-supp2-3207147.pdf",
"extension": "pdf",
"size": "269 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1GGLIh8KmSA",
"doi": "10.1109/TVCG.2022.3207004",
"abstract": "Redirected walking (RDW) enables users to explore large virtual spaces by real walking in small real spaces. How to effectively reduce physical collisions and decrease user perceptions of redirection are important for most RDW methods. This paper proposes a segmented redirection mapping method to calculate and map the roadmap of a large virtual space with inner obstacles to a mapped roadmap within a small real space. We adopt a Voronoi-based pruning method to extract the roadmap of the virtual space and design an RDW platform to interactively modify the virtual roadmap. We propose a roadmap mapping method based on divide-and-conquer and dynamic planning strategies to subdivide the virtual roadmap into several sub-virtual roads that are mapped individually. By recording connections of different sub-virtual roads, our method is applicable to virtual roadmaps with loop structures. During mapping, we apply the reset and redirection gains of the RDW technique as optimal aims and restrict conditions to obtain the mapped roadmap, which has small path curving and contains as few resets as possible. By real walking along the mapped roadmap, users perceive moving along the virtual roadmap to explore the entire virtual space. The experiment shows that our method works effectively for various virtual spaces with or without inner obstacles. Furthermore, our method is flexible in obtaining mapped roadmaps of different real spaces when the virtual space is fixed. Compared to prevalent RDW methods, our method can significantly reduce physical boundary collisions and maintain user experience of virtual roaming.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Redirected walking (RDW) enables users to explore large virtual spaces by real walking in small real spaces. How to effectively reduce physical collisions and decrease user perceptions of redirection are important for most RDW methods. This paper proposes a segmented redirection mapping method to calculate and map the roadmap of a large virtual space with inner obstacles to a mapped roadmap within a small real space. We adopt a Voronoi-based pruning method to extract the roadmap of the virtual space and design an RDW platform to interactively modify the virtual roadmap. We propose a roadmap mapping method based on divide-and-conquer and dynamic planning strategies to subdivide the virtual roadmap into several sub-virtual roads that are mapped individually. By recording connections of different sub-virtual roads, our method is applicable to virtual roadmaps with loop structures. During mapping, we apply the reset and redirection gains of the RDW technique as optimal aims and restrict conditions to obtain the mapped roadmap, which has small path curving and contains as few resets as possible. By real walking along the mapped roadmap, users perceive moving along the virtual roadmap to explore the entire virtual space. The experiment shows that our method works effectively for various virtual spaces with or without inner obstacles. Furthermore, our method is flexible in obtaining mapped roadmaps of different real spaces when the virtual space is fixed. Compared to prevalent RDW methods, our method can significantly reduce physical boundary collisions and maintain user experience of virtual roaming.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Redirected walking (RDW) enables users to explore large virtual spaces by real walking in small real spaces. How to effectively reduce physical collisions and decrease user perceptions of redirection are important for most RDW methods. This paper proposes a segmented redirection mapping method to calculate and map the roadmap of a large virtual space with inner obstacles to a mapped roadmap within a small real space. We adopt a Voronoi-based pruning method to extract the roadmap of the virtual space and design an RDW platform to interactively modify the virtual roadmap. We propose a roadmap mapping method based on divide-and-conquer and dynamic planning strategies to subdivide the virtual roadmap into several sub-virtual roads that are mapped individually. By recording connections of different sub-virtual roads, our method is applicable to virtual roadmaps with loop structures. During mapping, we apply the reset and redirection gains of the RDW technique as optimal aims and restrict conditions to obtain the mapped roadmap, which has small path curving and contains as few resets as possible. By real walking along the mapped roadmap, users perceive moving along the virtual roadmap to explore the entire virtual space. The experiment shows that our method works effectively for various virtual spaces with or without inner obstacles. Furthermore, our method is flexible in obtaining mapped roadmaps of different real spaces when the virtual space is fixed. Compared to prevalent RDW methods, our method can significantly reduce physical boundary collisions and maintain user experience of virtual roaming.",
"title": "A Segmented Redirection Mapping Method for Roadmaps of Large Constrained Virtual Environments",
"normalizedTitle": "A Segmented Redirection Mapping Method for Roadmaps of Large Constrained Virtual Environments",
"fno": "09893374",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Legged Locomotion",
"Roads",
"Planning",
"Navigation",
"Space Exploration",
"Virtual Environments",
"Visualization",
"Dynamic Planning",
"Real Walking",
"Redirected Walking",
"Virtual Reality",
"Voronoi Diagram"
],
"authors": [
{
"givenName": "Huiyu",
"surname": "Li",
"fullName": "Huiyu Li",
"affiliation": "Shandong University of Finance and Economics, Jinan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Linwei",
"surname": "Fan",
"fullName": "Linwei Fan",
"affiliation": "Shandong University of Finance and Economics, Jinan, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-09-01 00:00:00",
"pubType": "trans",
"pages": "1-16",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2016/0842/0/07460038",
"title": "Curvature manipulation techniques in redirection using haptic cues",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460038/12OmNxTVU2T",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/11/ttg2013111872",
"title": "Optimizing Constrained-Environment Redirected Walking Instructions Using Search Techniques",
"doi": null,
"abstractUrl": "/journal/tg/2013/11/ttg2013111872/13rRUIM2VBH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2018/7459/0/745900a115",
"title": "Rethinking Redirected Walking: On the Use of Curvature Gains Beyond Perceptual Limitations and Revisiting Bending Gains",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2018/745900a115/17D45WK5AlG",
"parentPublication": {
"id": "proceedings/ismar/2018/7459/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09715723",
"title": "Adaptive Redirection: A Context-Aware Redirected Walking Meta-Strategy",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09715723/1B4hxCQXB4c",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a524",
"title": "The Chaotic Behavior of Redirection – Revisiting Simulations in Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a524/1CJc4FECUko",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a167",
"title": "Foldable Spaces: An Overt Redirection Approach for Natural Walking in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a167/1CJc5J6RYYM",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09785918",
"title": "Redirected Walking for Exploring Immersive Virtual Spaces with HMD: A Comprehensive Review and Recent Advances",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09785918/1DPaEdHg6KQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a554",
"title": "Short-term Path Prediction for Spontaneous Human Locomotion in Arbitrary Virtual Spaces",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a554/1J7WabiAcYE",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090521",
"title": "A Constrained Path Redirection for Passive Haptics",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090521/1jIxpAQuq8o",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090671",
"title": "The Influence of Full-Body Representation on Translation and Curvature Gain",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090671/1jIxqcIwi64",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09893375",
"articleId": "1GGLG3PqZJC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09894094",
"articleId": "1GIqpC6j7na",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1GGLG3PqZJC",
"doi": "10.1109/TVCG.2022.3206915",
"abstract": "The change of the user's viewpoint in an immersive virtual environment, called locomotion, is one of the key components in a virtual reality interface. Effects of locomotion, such as simulator sickness or disorientation, depend on the specific design of the locomotion method and can influence the task performance as well as the overall acceptance of the virtual reality system. Thus, it is important that a locomotion method achieves the intended effects. The complexity of this task has increased with the growing number of locomotion methods and design choices in recent years. Locomotion taxonomies are classification schemes that group multiple locomotion methods and can aid in the design and selection of locomotion methods. Like locomotion methods themselves, there exist multiple locomotion taxonomies, each with a different focus and, consequently, a different possible outcome. However, there is little research that focuses on locomotion taxonomies. We performed a systematic literature review to provide an overview of possible locomotion taxonomies and analysis of possible decision criteria such as impact, common elements, and use cases for locomotion taxonomies. We aim to support future research on the design, choice, and evaluation of locomotion taxonomies and thereby support future research on virtual reality locomotion.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The change of the user's viewpoint in an immersive virtual environment, called locomotion, is one of the key components in a virtual reality interface. Effects of locomotion, such as simulator sickness or disorientation, depend on the specific design of the locomotion method and can influence the task performance as well as the overall acceptance of the virtual reality system. Thus, it is important that a locomotion method achieves the intended effects. The complexity of this task has increased with the growing number of locomotion methods and design choices in recent years. Locomotion taxonomies are classification schemes that group multiple locomotion methods and can aid in the design and selection of locomotion methods. Like locomotion methods themselves, there exist multiple locomotion taxonomies, each with a different focus and, consequently, a different possible outcome. However, there is little research that focuses on locomotion taxonomies. We performed a systematic literature review to provide an overview of possible locomotion taxonomies and analysis of possible decision criteria such as impact, common elements, and use cases for locomotion taxonomies. We aim to support future research on the design, choice, and evaluation of locomotion taxonomies and thereby support future research on virtual reality locomotion.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The change of the user's viewpoint in an immersive virtual environment, called locomotion, is one of the key components in a virtual reality interface. Effects of locomotion, such as simulator sickness or disorientation, depend on the specific design of the locomotion method and can influence the task performance as well as the overall acceptance of the virtual reality system. Thus, it is important that a locomotion method achieves the intended effects. The complexity of this task has increased with the growing number of locomotion methods and design choices in recent years. Locomotion taxonomies are classification schemes that group multiple locomotion methods and can aid in the design and selection of locomotion methods. Like locomotion methods themselves, there exist multiple locomotion taxonomies, each with a different focus and, consequently, a different possible outcome. However, there is little research that focuses on locomotion taxonomies. We performed a systematic literature review to provide an overview of possible locomotion taxonomies and analysis of possible decision criteria such as impact, common elements, and use cases for locomotion taxonomies. We aim to support future research on the design, choice, and evaluation of locomotion taxonomies and thereby support future research on virtual reality locomotion.",
"title": "A Systematic Literature Review of Virtual Reality Locomotion Taxonomies",
"normalizedTitle": "A Systematic Literature Review of Virtual Reality Locomotion Taxonomies",
"fno": "09893375",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Taxonomy",
"Systematics",
"Bibliographies",
"Virtual Environments",
"Task Analysis",
"Planning",
"Databases",
"Classification",
"Immersive Virtual Environments",
"Locomotion",
"Semantic Similarity Computation",
"Survey",
"Systematic Literatur Review",
"Taxonomies",
"Travel",
"Virtual Reality"
],
"authors": [
{
"givenName": "Lisa Marie",
"surname": "Prinz",
"fullName": "Lisa Marie Prinz",
"affiliation": "Intelligent and Immersive Systems Group at Fraunhofer FKIE, Bonn, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tintu",
"surname": "Mathew",
"fullName": "Tintu Mathew",
"affiliation": "Intelligent and Immersive Systems Group at Fraunhofer FKIE, Bonn, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Benjamin",
"surname": "Weyers",
"fullName": "Benjamin Weyers",
"affiliation": "Human-Computer Interaction, Trier University, Trier, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-09-01 00:00:00",
"pubType": "trans",
"pages": "1-17",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2020/06/08580399",
"title": "Virtual Locomotion: A Survey",
"doi": null,
"abstractUrl": "/journal/tg/2020/06/08580399/17D45VUZMU0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a270",
"title": "Research Trends in Virtual Reality Locomotion Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a270/1CJbIxglNqU",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a660",
"title": "Towards Conducting Effective Locomotion Through Hardware Transformation in Head-Mounted-Device - A Review Study",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a660/1CJcC7q0PRu",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a502",
"title": "Virtual Reality in Small and Medium-Sized Enterprises: A Systematic Literature Review",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a502/1CJe1JPMyK4",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a696",
"title": "Seamless-walk: Novel Natural Virtual Reality Locomotion Method with a High-Resolution Tactile Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a696/1CJeXaYYtd6",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2022/8810/0/881000a545",
"title": "A Systematic Literature Review of Virtual and Augmented Reality Applications for Maintenance in Manufacturing",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2022/881000a545/1FJ5OxsS4Ba",
"parentPublication": {
"id": "proceedings/compsac/2022/8810/0",
"title": "2022 IEEE 46th Annual Computers, Software, and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imet/2022/7016/0/09930000",
"title": "A Systematic Literature Review of Virtual Reality-based Reminiscence Therapy for People with Cognitive Impairment or Dementia",
"doi": null,
"abstractUrl": "/proceedings-article/imet/2022/09930000/1HYuUJdaziU",
"parentPublication": {
"id": "proceedings/imet/2022/7016/0",
"title": "2022 International Conference on Interactive Media, Smart Systems and Emerging Technologies (IMET)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2018/1360/0/136000a596",
"title": "A Systematic Literature Review of Indicators for the Understanding of Interactions in Virtual Learning Environments",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2018/136000a596/1gjRw0EJe4U",
"parentPublication": {
"id": "proceedings/csci/2018/1360/0",
"title": "2018 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a385",
"title": "An Overview and Analysis of Publications on Locomotion Taxonomies",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a385/1tnXOcjEGJ2",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a431",
"title": "A Taxonomy of Interaction Techniques for Immersive Augmented Reality based on an Iterative Literature Review",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a431/1yeD62B4zza",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09891797",
"articleId": "1GF6PmosQr6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09893374",
"articleId": "1GGLIh8KmSA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1GF6PmosQr6",
"doi": "10.1109/TVCG.2022.3206579",
"abstract": "Simulating liquid-textile interaction has received great attention in computer graphics recently. Most existing methods take textiles as particles or parameterized meshes. Although these methods can generate visually pleasing results, they cannot simulate water content at a microscopic level due to the lack of geometrically modeling of textile's anisotropic structure. In this paper, we develop a method for yarn-level simulation of hygroscopicity of textiles and evaluate it using various quantitative metrics. We model textiles in a fiber-yarn-fabric multi-scale manner and consider the dynamic coupled physical mechanisms of liquid spreading, including wetting, wicking, moisture sorption/desorption, and transient moisture-heat transfer in textiles. Our method can accurately simulate liquid spreading on textiles with different fiber materials and geometrical structures with consideration of air temperatures and humidity conditions. It visualizes the hygroscopicity of textiles to demonstrate their moisture management ability. We conduct qualitative and quantitative experiments to validate our method and explore various factors to analyze their influence on liquid spreading and hygroscopicity of textiles.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Simulating liquid-textile interaction has received great attention in computer graphics recently. Most existing methods take textiles as particles or parameterized meshes. Although these methods can generate visually pleasing results, they cannot simulate water content at a microscopic level due to the lack of geometrically modeling of textile's anisotropic structure. In this paper, we develop a method for yarn-level simulation of hygroscopicity of textiles and evaluate it using various quantitative metrics. We model textiles in a fiber-yarn-fabric multi-scale manner and consider the dynamic coupled physical mechanisms of liquid spreading, including wetting, wicking, moisture sorption/desorption, and transient moisture-heat transfer in textiles. Our method can accurately simulate liquid spreading on textiles with different fiber materials and geometrical structures with consideration of air temperatures and humidity conditions. It visualizes the hygroscopicity of textiles to demonstrate their moisture management ability. We conduct qualitative and quantitative experiments to validate our method and explore various factors to analyze their influence on liquid spreading and hygroscopicity of textiles.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Simulating liquid-textile interaction has received great attention in computer graphics recently. Most existing methods take textiles as particles or parameterized meshes. Although these methods can generate visually pleasing results, they cannot simulate water content at a microscopic level due to the lack of geometrically modeling of textile's anisotropic structure. In this paper, we develop a method for yarn-level simulation of hygroscopicity of textiles and evaluate it using various quantitative metrics. We model textiles in a fiber-yarn-fabric multi-scale manner and consider the dynamic coupled physical mechanisms of liquid spreading, including wetting, wicking, moisture sorption/desorption, and transient moisture-heat transfer in textiles. Our method can accurately simulate liquid spreading on textiles with different fiber materials and geometrical structures with consideration of air temperatures and humidity conditions. It visualizes the hygroscopicity of textiles to demonstrate their moisture management ability. We conduct qualitative and quantitative experiments to validate our method and explore various factors to analyze their influence on liquid spreading and hygroscopicity of textiles.",
"title": "Yarn-Level Simulation of Hygroscopicity of Woven Textiles",
"normalizedTitle": "Yarn-Level Simulation of Hygroscopicity of Woven Textiles",
"fno": "09891797",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Textiles",
"Liquids",
"Computational Modeling",
"Yarn",
"Fluids",
"Fabrics",
"Visualization",
"Anisotropic Textile",
"Fluid Dynamics",
"Physical Models",
"Microscopic Simulation",
"Liquid Textile Interaction"
],
"authors": [
{
"givenName": "Aihua",
"surname": "Mao",
"fullName": "Aihua Mao",
"affiliation": "School of Computer Science and Engineering, South China University of Technology, Guangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wenbo",
"surname": "Dong",
"fullName": "Wenbo Dong",
"affiliation": "School of Computer Science and Engineering, South China University of Technology, Guangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chaoqiang",
"surname": "Xie",
"fullName": "Chaoqiang Xie",
"affiliation": "School of Software Engineering, South China University of Technology, Guangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huamin",
"surname": "Wang",
"fullName": "Huamin Wang",
"affiliation": "Chief Scientist at Style3D Research, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yong-Jin",
"surname": "Liu",
"fullName": "Yong-Jin Liu",
"affiliation": "BNRist, Department of Computer Science and Technology, MOE-Key Laboratory of Pervasive Computing, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guiqing",
"surname": "Li",
"fullName": "Guiqing Li",
"affiliation": "School of Computer Science and Engineering, South China University of Technology, Guangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ying",
"surname": "He",
"fullName": "Ying He",
"affiliation": "School of Computer Science and Engineering, Nanyang Technological University, Singapore",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-09-01 00:00:00",
"pubType": "trans",
"pages": "1-18",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icbake/2011/4512/0/4512a049",
"title": "Fabric Weave Form Simulation Based on the Characteristics of Yarn",
"doi": null,
"abstractUrl": "/proceedings-article/icbake/2011/4512a049/12OmNAXxXg3",
"parentPublication": {
"id": "proceedings/icbake/2011/4512/0",
"title": "Biometrics and Kansei Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmlc/2010/6006/0/05460714",
"title": "An interactive tool for yarn strength prediction using support vector regression",
"doi": null,
"abstractUrl": "/proceedings-article/icmlc/2010/05460714/12OmNCctfdr",
"parentPublication": {
"id": "proceedings/icmlc/2010/6006/0",
"title": "2nd International Conference on Machine Learning and Computing (ICMLC 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ectc/2017/6315/0/07999977",
"title": "Stress Analysis of Flexible Packaging for the Integration of Electronic Components within Woven Textiles",
"doi": null,
"abstractUrl": "/proceedings-article/ectc/2017/07999977/12OmNqOwQBq",
"parentPublication": {
"id": "proceedings/ectc/2017/6315/0",
"title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iswc/2010/9046/0/05665874",
"title": "Weaving integrated circuits into textiles",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/2010/05665874/12OmNrYCXTr",
"parentPublication": {
"id": "proceedings/iswc/2010/9046/0",
"title": "International Symposium on Wearable Computers (ISWC) 2010",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipdps/2006/0054/0/01639318",
"title": "A dependable infrastructure of the electric network for e-textiles",
"doi": null,
"abstractUrl": "/proceedings-article/ipdps/2006/01639318/12OmNzDehb3",
"parentPublication": {
"id": "proceedings/ipdps/2006/0054/0",
"title": "Proceedings 20th IEEE International Parallel & Distributed Processing Symposium",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2014/7615/0/07097909",
"title": "Transmission characteristics of hybrid structure yarns for e-textiles",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2014/07097909/12OmNzIUfY5",
"parentPublication": {
"id": "proceedings/icpads/2014/7615/0",
"title": "2014 20th IEEE International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/pc/2013/03/mpc2013030081",
"title": "Smart Textiles: From Niche to Mainstream",
"doi": null,
"abstractUrl": "/magazine/pc/2013/03/mpc2013030081/13rRUwInvir",
"parentPublication": {
"id": "mags/pc",
"title": "IEEE Pervasive Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/03/ttg2013030420",
"title": "IDSS: A Novel Representation for Woven Fabrics",
"doi": null,
"abstractUrl": "/journal/tg/2013/03/ttg2013030420/13rRUxCitJb",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/02/07516643",
"title": "Yarn-Level Cloth Simulation with Sliding Persistent Contacts",
"doi": null,
"abstractUrl": "/journal/tg/2017/02/07516643/13rRUxlgy3N",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/07/08356670",
"title": "Simulation of Textile Stains",
"doi": null,
"abstractUrl": "/journal/tg/2019/07/08356670/13rRUzphDy3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09889219",
"articleId": "1GDryH066Lm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09893375",
"articleId": "1GGLG3PqZJC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1GGLGSrI7OU",
"name": "ttg555501-09891797s1-supp1-3206579.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09891797s1-supp1-3206579.mp4",
"extension": "mp4",
"size": "166 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1GDryH066Lm",
"doi": "10.1109/TVCG.2022.3206207",
"abstract": "The paper presents a 3D reconstruction algorithm from an undersampled circular light field (LF). With an ultra-dense angular sampling rate, every scene point captured by a circular LF corresponds to a smooth trajectory in the circular epipolar plane volume (CEPV). Thus per-pixel disparities can be calculated by retrieving the local gradients of the CEPV-trajectories. However, the continuous curve will be broken up into discrete segments in an undersampled circular LF, which leads to a noticeable deterioration of the 3D reconstruction accuracy. We observe that the coherent structure is still embedded in the discrete segments. With less noise and ambiguity, the scene points can be reconstructed using gradients from reliable epipolar plane image (EPI) regions. By analyzing the geometric characteristics of the coherent structure in the CEPV, both the trajectory itself and its gradients could be modeled as 3D predictable series. Thus a mask-guided CNN+LSTM network is proposed to learn the mapping from the CEPV with a lower angular sampling rate to the gradients under a higher angular sampling rate. To segment the reliable regions, the reliable-mask-based loss that assesses the difference between learned gradients and ground truth gradients is added to the loss function. We construct a synthetic circular LF dataset with ground truth for depth and foreground/background segmentation to train the network. Moreover, a real-scene circular LF dataset is collected for performance evaluation. Experimental results on both public and self-constructed datasets demonstrate the superiority of the proposed method over existing state-of-the-art methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The paper presents a 3D reconstruction algorithm from an undersampled circular light field (LF). With an ultra-dense angular sampling rate, every scene point captured by a circular LF corresponds to a smooth trajectory in the circular epipolar plane volume (CEPV). Thus per-pixel disparities can be calculated by retrieving the local gradients of the CEPV-trajectories. However, the continuous curve will be broken up into discrete segments in an undersampled circular LF, which leads to a noticeable deterioration of the 3D reconstruction accuracy. We observe that the coherent structure is still embedded in the discrete segments. With less noise and ambiguity, the scene points can be reconstructed using gradients from reliable epipolar plane image (EPI) regions. By analyzing the geometric characteristics of the coherent structure in the CEPV, both the trajectory itself and its gradients could be modeled as 3D predictable series. Thus a mask-guided CNN+LSTM network is proposed to learn the mapping from the CEPV with a lower angular sampling rate to the gradients under a higher angular sampling rate. To segment the reliable regions, the reliable-mask-based loss that assesses the difference between learned gradients and ground truth gradients is added to the loss function. We construct a synthetic circular LF dataset with ground truth for depth and foreground/background segmentation to train the network. Moreover, a real-scene circular LF dataset is collected for performance evaluation. Experimental results on both public and self-constructed datasets demonstrate the superiority of the proposed method over existing state-of-the-art methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The paper presents a 3D reconstruction algorithm from an undersampled circular light field (LF). With an ultra-dense angular sampling rate, every scene point captured by a circular LF corresponds to a smooth trajectory in the circular epipolar plane volume (CEPV). Thus per-pixel disparities can be calculated by retrieving the local gradients of the CEPV-trajectories. However, the continuous curve will be broken up into discrete segments in an undersampled circular LF, which leads to a noticeable deterioration of the 3D reconstruction accuracy. We observe that the coherent structure is still embedded in the discrete segments. With less noise and ambiguity, the scene points can be reconstructed using gradients from reliable epipolar plane image (EPI) regions. By analyzing the geometric characteristics of the coherent structure in the CEPV, both the trajectory itself and its gradients could be modeled as 3D predictable series. Thus a mask-guided CNN+LSTM network is proposed to learn the mapping from the CEPV with a lower angular sampling rate to the gradients under a higher angular sampling rate. To segment the reliable regions, the reliable-mask-based loss that assesses the difference between learned gradients and ground truth gradients is added to the loss function. We construct a synthetic circular LF dataset with ground truth for depth and foreground/background segmentation to train the network. Moreover, a real-scene circular LF dataset is collected for performance evaluation. Experimental results on both public and self-constructed datasets demonstrate the superiority of the proposed method over existing state-of-the-art methods.",
"title": "Learning Reliable Gradients From Undersampled Circular Light Field for 3D Reconstruction",
"normalizedTitle": "Learning Reliable Gradients From Undersampled Circular Light Field for 3D Reconstruction",
"fno": "09889219",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Three Dimensional Displays",
"Trajectory",
"Light Fields",
"Reliability",
"Cameras",
"Image Reconstruction",
"Estimation",
"3 D Reconstruction",
"CNN LSTM",
"Circular Epipolar Plane Volume CEPV",
"Circular Light Field"
],
"authors": [
{
"givenName": "Zhengxi",
"surname": "Song",
"fullName": "Zhengxi Song",
"affiliation": "School of Computer Science, Northwestern Polytechnical University, Xi'an, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xue",
"surname": "Wang",
"fullName": "Xue Wang",
"affiliation": "School of Computer Science, Northwestern Polytechnical University, Xi'an, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hao",
"surname": "Zhu",
"fullName": "Hao Zhu",
"affiliation": "School of Electronic Science and Engineering, Nanjing University, Nanjing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guoqing",
"surname": "Zhou",
"fullName": "Guoqing Zhou",
"affiliation": "School of Computer Science, Northwestern Polytechnical University, Xi'an, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Qing",
"surname": "Wang",
"fullName": "Qing Wang",
"affiliation": "School of Computer Science, Northwestern Polytechnical University, Xi'an, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-09-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457g873",
"title": "Snapshot Hyperspectral Light Field Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457g873/12OmNqNG3iJ",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000h327",
"title": "Robust Hough Transform Based 3D Reconstruction from Circular Light Fields",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000h327/17D45VtKiyt",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/01/09716806",
"title": "Disentangling Light Fields for Super-Resolution and Disparity Estimation",
"doi": null,
"abstractUrl": "/journal/tp/2023/01/09716806/1B5WzcrxgIM",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200c471",
"title": "SeLFVi: Self-supervised Light-Field Video Reconstruction from Stereo Video",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200c471/1BmEZ8DjBug",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09798876",
"title": "Deep Light Field Spatial Super-Resolution Using Heterogeneous Imaging",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09798876/1Eho8QXQucg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icaice/2021/2186/0/218600a610",
"title": "Panoramic Light Field - Oriented Image Stitching Method with Semantics and Projection",
"doi": null,
"abstractUrl": "/proceedings-article/icaice/2021/218600a610/1Et4s9AdA76",
"parentPublication": {
"id": "proceedings/icaice/2021/2186/0",
"title": "2021 2nd International Conference on Artificial Intelligence and Computer Engineering (ICAICE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956107",
"title": "A Deep Retinex Framework for Light Field Restoration under Low-light Conditions",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956107/1IHqjKP3U8o",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600a229",
"title": "I See-Through You: A Framework for Removing Foreground Occlusion in Both Sparse and Dense Light Field Images",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600a229/1KxUoKycxmE",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102847",
"title": "Accurate 3D Reconstruction from Circular Light Field Using CNN-LSTM",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102847/1kwrbNTmyn6",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/10/09448470",
"title": "Deep Spatial-Angular Regularization for Light Field Imaging, Denoising, and Super-Resolution",
"doi": null,
"abstractUrl": "/journal/tp/2022/10/09448470/1ugE5vtunqo",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09887905",
"articleId": "1GBRnRoYZIA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09891797",
"articleId": "1GF6PmosQr6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1GBRnHyZ1bW",
"doi": "10.1109/TVCG.2022.3205614",
"abstract": "A high dynamic range (HDR) image is commonly used to reveal stereo illumination, which is crucial for generating high-quality realistic rendering effects. Compared to the high-cost HDR imaging technique, low dynamic range (LDR) imaging provides a low-cost alternative and is preferable for interactive graphics applications. However, the limited LDR pixel bit depth significantly bothers accurate illumination estimation using LDR images. The conflict between the realism and promptness of illumination estimation for realistic rendering is yet to be resolved. In this paper, an efficient method that accurately infers illuminations of real-world scenes using LDR panoramic images is proposed. It estimates multiple lighting parameters, including locations, types and intensities of light sources. In our approach, a new algorithm that extracts illuminant characteristics during the exposure attenuation process is developed to locate light sources and outline their boundaries. To better predict realistic illuminations, a new deep learning model is designed to efficiently parse complex LDR panoramas and classify detected light sources. Finally, realistic illumination intensities are calculated by recovering the inverse camera response function and extending the dynamic range of pixel values based on previously estimated parameters of light sources. The reconstructed radiance map can be used to compute high-quality image-based lighting of virtual models. Experimental results demonstrate that the proposed method is capable of efficiently and accurately computing comprehensive illuminations using LDR images. Our method can be used to produce better realistic rendering results than existing approaches.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A high dynamic range (HDR) image is commonly used to reveal stereo illumination, which is crucial for generating high-quality realistic rendering effects. Compared to the high-cost HDR imaging technique, low dynamic range (LDR) imaging provides a low-cost alternative and is preferable for interactive graphics applications. However, the limited LDR pixel bit depth significantly bothers accurate illumination estimation using LDR images. The conflict between the realism and promptness of illumination estimation for realistic rendering is yet to be resolved. In this paper, an efficient method that accurately infers illuminations of real-world scenes using LDR panoramic images is proposed. It estimates multiple lighting parameters, including locations, types and intensities of light sources. In our approach, a new algorithm that extracts illuminant characteristics during the exposure attenuation process is developed to locate light sources and outline their boundaries. To better predict realistic illuminations, a new deep learning model is designed to efficiently parse complex LDR panoramas and classify detected light sources. Finally, realistic illumination intensities are calculated by recovering the inverse camera response function and extending the dynamic range of pixel values based on previously estimated parameters of light sources. The reconstructed radiance map can be used to compute high-quality image-based lighting of virtual models. Experimental results demonstrate that the proposed method is capable of efficiently and accurately computing comprehensive illuminations using LDR images. Our method can be used to produce better realistic rendering results than existing approaches.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A high dynamic range (HDR) image is commonly used to reveal stereo illumination, which is crucial for generating high-quality realistic rendering effects. Compared to the high-cost HDR imaging technique, low dynamic range (LDR) imaging provides a low-cost alternative and is preferable for interactive graphics applications. However, the limited LDR pixel bit depth significantly bothers accurate illumination estimation using LDR images. The conflict between the realism and promptness of illumination estimation for realistic rendering is yet to be resolved. In this paper, an efficient method that accurately infers illuminations of real-world scenes using LDR panoramic images is proposed. It estimates multiple lighting parameters, including locations, types and intensities of light sources. In our approach, a new algorithm that extracts illuminant characteristics during the exposure attenuation process is developed to locate light sources and outline their boundaries. To better predict realistic illuminations, a new deep learning model is designed to efficiently parse complex LDR panoramas and classify detected light sources. Finally, realistic illumination intensities are calculated by recovering the inverse camera response function and extending the dynamic range of pixel values based on previously estimated parameters of light sources. The reconstructed radiance map can be used to compute high-quality image-based lighting of virtual models. Experimental results demonstrate that the proposed method is capable of efficiently and accurately computing comprehensive illuminations using LDR images. Our method can be used to produce better realistic rendering results than existing approaches.",
"title": "Fast and Accurate Illumination Estimation Using LDR Panoramic Images for Realistic Rendering",
"normalizedTitle": "Fast and Accurate Illumination Estimation Using LDR Panoramic Images for Realistic Rendering",
"fno": "09887904",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Lighting",
"Light Sources",
"Rendering Computer Graphics",
"Estimation",
"Cameras",
"Dynamic Range",
"Attenuation",
"Illumination Estimation",
"LDR Panoramic Image",
"Image Based Lighting",
"Realistic Rendering"
],
"authors": [
{
"givenName": "Haojie",
"surname": "Cheng",
"fullName": "Haojie Cheng",
"affiliation": "University of Science and Technology of China, Hefei, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chunxiao",
"surname": "Xu",
"fullName": "Chunxiao Xu",
"affiliation": "University of Science and Technology of China, Hefei, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jiajun",
"surname": "Wang",
"fullName": "Jiajun Wang",
"affiliation": "University of Science and Technology of China, Hefei, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhenxin",
"surname": "Chen",
"fullName": "Zhenxin Chen",
"affiliation": "University of Science and Technology of China, Hefei, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lingxiao",
"surname": "Zhao",
"fullName": "Lingxiao Zhao",
"affiliation": "University of Science and Technology of China, Hefei, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-09-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2012/2216/0/06460513",
"title": "Illumination normalization of face images with cast shadows",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460513/12OmNBQ2VPw",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2017/2089/0/2089a056",
"title": "Voxel-Based Interactive Rendering of Translucent Materials under Area Lights Using Sparse Samples",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2017/2089a056/12OmNvDqsQf",
"parentPublication": {
"id": "proceedings/cw/2017/2089/0",
"title": "2017 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2016/9036/0/9036a292",
"title": "An LED-Based Tunable Illumination for Diverse Medical Applications",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2016/9036a292/12OmNwDSdHu",
"parentPublication": {
"id": "proceedings/cbms/2016/9036/0",
"title": "2016 IEEE 29th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2013/5050/0/5050a913",
"title": "Cartoon Rendering Illumination Model Based on Phong",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2013/5050a913/12OmNwoPtun",
"parentPublication": {
"id": "proceedings/icig/2013/5050/0",
"title": "2013 Seventh International Conference on Image and Graphics (ICIG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836506",
"title": "Reflectance and Illumination Estimation for Realistic Augmentations of Real Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836506/12OmNx5GTXK",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a192",
"title": "[POSTER] Illumination Estimation Using Cast Shadows for Realistic Augmented Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a192/12OmNxX3uLh",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460098",
"title": "Illumination estimation from shadow and incomplete object shape captured by an RGB-D camera",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460098/12OmNzXWZK0",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2013/6463/0/06528300",
"title": "Descattering of transmissive observation using Parallel High-Frequency Illumination",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2013/06528300/12OmNzmclka",
"parentPublication": {
"id": "proceedings/iccp/2013/6463/0",
"title": "2013 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/04/08511066",
"title": "Automatic Spatially Varying Illumination Recovery of Indoor Scenes Based on a Single RGB-D Image",
"doi": null,
"abstractUrl": "/journal/tg/2020/04/08511066/14H4WOKjoti",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/09008252",
"title": "A Dataset of Multi-Illumination Images in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/09008252/1hVl7roiUlW",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09881908",
"articleId": "1Gv909WpCG4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09887905",
"articleId": "1GBRnRoYZIA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1GIqrXUJB3W",
"name": "ttg555501-09887904s1-supp2-3205614.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09887904s1-supp2-3205614.pdf",
"extension": "pdf",
"size": "28.9 MB",
"__typename": "WebExtraType"
},
{
"id": "1GIqtqys4fe",
"name": "ttg555501-09887904s1-supp1-3205614.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09887904s1-supp1-3205614.mp4",
"extension": "mp4",
"size": "141 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1GBRnRoYZIA",
"doi": "10.1109/TVCG.2022.3205769",
"abstract": "Visual stories are an effective and powerful tool to convey specific information to a diverse public. Scrollytelling is a recent visual storytelling technique extensively used on the web, where content appears or changes as users scroll up or down a page. By employing the familiar gesture of scrolling as its primary interaction mechanism, it provides users with a sense of control, exploration and discoverability while still offering a simple and intuitive interface. In this paper, we present a novel approach for authoring, editing, and presenting data-driven scientific narratives using scrollytelling. Our method flexibly integrates common sources such as images, text, and video, but also supports more specialized visualization techniques such as interactive maps as well as scalar field and mesh data visualizations. We show that scrolling navigation can be used to traverse dynamic narratives and demonstrate how it can be combined with interactive parameter exploration. The resulting system consists of an extensible web-based authoring tool capable of exporting stand-alone stories that can be hosted on any web server. We demonstrate the power and utility of our approach with case studies from several diverse scientific fields and with a user study including 12 participants of diverse professional backgrounds. Furthermore, an expert in creating interactive articles assessed the usefulness of our approach and the quality of the created stories.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visual stories are an effective and powerful tool to convey specific information to a diverse public. Scrollytelling is a recent visual storytelling technique extensively used on the web, where content appears or changes as users scroll up or down a page. By employing the familiar gesture of scrolling as its primary interaction mechanism, it provides users with a sense of control, exploration and discoverability while still offering a simple and intuitive interface. In this paper, we present a novel approach for authoring, editing, and presenting data-driven scientific narratives using scrollytelling. Our method flexibly integrates common sources such as images, text, and video, but also supports more specialized visualization techniques such as interactive maps as well as scalar field and mesh data visualizations. We show that scrolling navigation can be used to traverse dynamic narratives and demonstrate how it can be combined with interactive parameter exploration. The resulting system consists of an extensible web-based authoring tool capable of exporting stand-alone stories that can be hosted on any web server. We demonstrate the power and utility of our approach with case studies from several diverse scientific fields and with a user study including 12 participants of diverse professional backgrounds. Furthermore, an expert in creating interactive articles assessed the usefulness of our approach and the quality of the created stories.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visual stories are an effective and powerful tool to convey specific information to a diverse public. Scrollytelling is a recent visual storytelling technique extensively used on the web, where content appears or changes as users scroll up or down a page. By employing the familiar gesture of scrolling as its primary interaction mechanism, it provides users with a sense of control, exploration and discoverability while still offering a simple and intuitive interface. In this paper, we present a novel approach for authoring, editing, and presenting data-driven scientific narratives using scrollytelling. Our method flexibly integrates common sources such as images, text, and video, but also supports more specialized visualization techniques such as interactive maps as well as scalar field and mesh data visualizations. We show that scrolling navigation can be used to traverse dynamic narratives and demonstrate how it can be combined with interactive parameter exploration. The resulting system consists of an extensible web-based authoring tool capable of exporting stand-alone stories that can be hosted on any web server. We demonstrate the power and utility of our approach with case studies from several diverse scientific fields and with a user study including 12 participants of diverse professional backgrounds. Furthermore, an expert in creating interactive articles assessed the usefulness of our approach and the quality of the created stories.",
"title": "ScrollyVis: Interactive Visual Authoring of Guided Dynamic Narratives for Scientific Scrollytelling",
"normalizedTitle": "ScrollyVis: Interactive Visual Authoring of Guided Dynamic Narratives for Scientific Scrollytelling",
"fno": "09887905",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Visualization",
"Media",
"Motion Pictures",
"Authoring Systems",
"Writing",
"Web Servers"
],
"authors": [
{
"givenName": "Eric",
"surname": "Mörth",
"fullName": "Eric Mörth",
"affiliation": "Dept. of Informatics, University of Bergen, Norway",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Stefan",
"surname": "Bruckner",
"fullName": "Stefan Bruckner",
"affiliation": "Dept. of Informatics, University of Bergen, Norway",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Noeska N.",
"surname": "Smit",
"fullName": "Noeska N. Smit",
"affiliation": "Dept. of Informatics, University of Bergen, Norway",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-09-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/c5/2008/3115/0/3115a133",
"title": "New Metaphors for Multimedia Authoring Environments",
"doi": null,
"abstractUrl": "/proceedings-article/c5/2008/3115a133/12OmNClQ0Be",
"parentPublication": {
"id": "proceedings/c5/2008/3115/0",
"title": "2008 6th International Conference on Creating, Connecting and Collaborating through Computing (C5 '08)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccit/2009/3896/0/3896a034",
"title": "A Robot Motion Authoring Using Finger-Robot Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/iccit/2009/3896a034/12OmNvpew9N",
"parentPublication": {
"id": "proceedings/iccit/2009/3896/0",
"title": "Convergence Information Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2011/1799/0/06120478",
"title": "Preserving Narratives in Electronic Health Records",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2011/06120478/12OmNvqEvIv",
"parentPublication": {
"id": "proceedings/bibm/2011/1799/0",
"title": "2011 IEEE International Conference on Bioinformatics and Biomedicine",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ngmast/2009/3786/0/3786a153",
"title": "Authoring of Platform Independent Interactive Mobile Broadcasting Services",
"doi": null,
"abstractUrl": "/proceedings-article/ngmast/2009/3786a153/12OmNwpoFMB",
"parentPublication": {
"id": "proceedings/ngmast/2009/3786/0",
"title": "Next Generation Mobile Applications, Services and Technologies, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vl/1996/7508/0/75080276",
"title": "Interactive Authoring of Multimedia Documents",
"doi": null,
"abstractUrl": "/proceedings-article/vl/1996/75080276/12OmNy3iFh5",
"parentPublication": {
"id": "proceedings/vl/1996/7508/0",
"title": "Visual Languages, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/Ismar-mashd/2014/6887/0/06935436",
"title": "nARratives of augmented worlds",
"doi": null,
"abstractUrl": "/proceedings-article/Ismar-mashd/2014/06935436/12OmNzw8j8Y",
"parentPublication": {
"id": "proceedings/Ismar-mashd/2014/6887/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality - Media, Art, Social Science, Humanities and Design (ISMAR-MASH'D)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ci/2017/03/07439785",
"title": "A Survey on Story Generation Techniques for Authoring Computational Narratives",
"doi": null,
"abstractUrl": "/journal/ci/2017/03/07439785/13rRUy2YM1h",
"parentPublication": {
"id": "trans/ci",
"title": "IEEE Transactions on Computational Intelligence and AI in Games",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017584",
"title": "Visualizing Nonlinear Narratives with Story Curves",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017584/13rRUyueghe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vl-hcc/2019/0810/0/08818925",
"title": "A Framework for Authoring Logically Ordered Visual Data Stories",
"doi": null,
"abstractUrl": "/proceedings-article/vl-hcc/2019/08818925/1dsfTRHa1ZS",
"parentPublication": {
"id": "proceedings/vl-hcc/2019/0810/0",
"title": "2019 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/02/09547737",
"title": "ChartStory: Automated Partitioning, Layout, and Captioning of Charts into Comic-Style Narratives",
"doi": null,
"abstractUrl": "/journal/tg/2023/02/09547737/1x9TL0bvSlq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09887904",
"articleId": "1GBRnHyZ1bW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09889219",
"articleId": "1GDryH066Lm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1GGLHAmqvtK",
"name": "ttg555501-09887905s1-tvcg-3205769-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09887905s1-tvcg-3205769-mm.zip",
"extension": "zip",
"size": "207 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Gv909WpCG4",
"doi": "10.1109/TVCG.2022.3205181",
"abstract": "There has been an increasing focus on haptic interfaces for virtual reality (VR), to support a high-quality touch experience. However, it is still challenging to haptically simulate the real-world walking experience in different fluid mediums. To tackle this problem, we present <italic>PropelWalker</italic>, a pair of calf-worn haptic devices for simulating the buoyancy and the resistant force when the human's lower limbs are interacting with different fluids and materials in VR. By using four ducted fans, two installed on each calf, the system can control the strength and the direction of the airflow in real time to provide different levels of force. Our technical evaluation shows that <italic>PropelWalker</italic> can generate vertical forces up to 27 N in two directions (i.e., upward and downward) within 0.85 seconds. Furthermore, the system can stably maintain the generated force with minor turbulence. We further conducted three user-perception studies to understand the capability of <italic>PropelWalker</italic> to generate distinguishable force stimuli. Firstly, we conducted the just-noticeable-difference (JND) experiments to investigate the threshold of the human perception of on-leg air-flow force feedback. Our second perception study showed that users could distinguish four <italic>PropelWalker</italic>-generated force levels for simulating different walking mediums (i.e., dry ground, water, mud, and sand), with an average accuracy of 94.2%. Lastly, our VR user study showed that <italic>PropelWalker</italic> could significantly improve the users' sense of presence in VR.",
"abstracts": [
{
"abstractType": "Regular",
"content": "There has been an increasing focus on haptic interfaces for virtual reality (VR), to support a high-quality touch experience. However, it is still challenging to haptically simulate the real-world walking experience in different fluid mediums. To tackle this problem, we present <italic>PropelWalker</italic>, a pair of calf-worn haptic devices for simulating the buoyancy and the resistant force when the human's lower limbs are interacting with different fluids and materials in VR. By using four ducted fans, two installed on each calf, the system can control the strength and the direction of the airflow in real time to provide different levels of force. Our technical evaluation shows that <italic>PropelWalker</italic> can generate vertical forces up to 27 N in two directions (i.e., upward and downward) within 0.85 seconds. Furthermore, the system can stably maintain the generated force with minor turbulence. We further conducted three user-perception studies to understand the capability of <italic>PropelWalker</italic> to generate distinguishable force stimuli. Firstly, we conducted the just-noticeable-difference (JND) experiments to investigate the threshold of the human perception of on-leg air-flow force feedback. Our second perception study showed that users could distinguish four <italic>PropelWalker</italic>-generated force levels for simulating different walking mediums (i.e., dry ground, water, mud, and sand), with an average accuracy of 94.2%. Lastly, our VR user study showed that <italic>PropelWalker</italic> could significantly improve the users' sense of presence in VR.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "There has been an increasing focus on haptic interfaces for virtual reality (VR), to support a high-quality touch experience. However, it is still challenging to haptically simulate the real-world walking experience in different fluid mediums. To tackle this problem, we present PropelWalker, a pair of calf-worn haptic devices for simulating the buoyancy and the resistant force when the human's lower limbs are interacting with different fluids and materials in VR. By using four ducted fans, two installed on each calf, the system can control the strength and the direction of the airflow in real time to provide different levels of force. Our technical evaluation shows that PropelWalker can generate vertical forces up to 27 N in two directions (i.e., upward and downward) within 0.85 seconds. Furthermore, the system can stably maintain the generated force with minor turbulence. We further conducted three user-perception studies to understand the capability of PropelWalker to generate distinguishable force stimuli. Firstly, we conducted the just-noticeable-difference (JND) experiments to investigate the threshold of the human perception of on-leg air-flow force feedback. Our second perception study showed that users could distinguish four PropelWalker-generated force levels for simulating different walking mediums (i.e., dry ground, water, mud, and sand), with an average accuracy of 94.2%. Lastly, our VR user study showed that PropelWalker could significantly improve the users' sense of presence in VR.",
"title": "PropelWalker: A Leg-Based Wearable System With Propeller-Based Force Feedback for Walking in Fluids in VR",
"normalizedTitle": "PropelWalker: A Leg-Based Wearable System With Propeller-Based Force Feedback for Walking in Fluids in VR",
"fno": "09881908",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Legged Locomotion",
"Propulsion",
"Force",
"Force Feedback",
"Foot",
"Propellers",
"Fans",
"Fluid",
"Haptic",
"Propeller",
"Virtual Reality"
],
"authors": [
{
"givenName": "Pingchuan",
"surname": "Ke",
"fullName": "Pingchuan Ke",
"affiliation": "School of Creative Media, City University of Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shaoyu",
"surname": "Cai",
"fullName": "Shaoyu Cai",
"affiliation": "School of Creative Media, City University of Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haichen",
"surname": "Gao",
"fullName": "Haichen Gao",
"affiliation": "School of Creative Media, City University of Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kening",
"surname": "Zhu",
"fullName": "Kening Zhu",
"affiliation": "School of Creative Media, City University of Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-09-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/haptics/2008/2005/0/04479927",
"title": "Force Amplitude Perception in Six Orthogonal Directions",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2008/04479927/12OmNAndiu9",
"parentPublication": {
"id": "proceedings/haptics/2008/2005/0",
"title": "IEEE Haptics Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icndc/2010/8382/0/05645365",
"title": "Force/Torque-based Compliance Control for Humanoid Robot to Compensate the Landing Impact Force",
"doi": null,
"abstractUrl": "/proceedings-article/icndc/2010/05645365/12OmNAoDibc",
"parentPublication": {
"id": "proceedings/icndc/2010/8382/0",
"title": "2010 First International Conference on Networking and Distributed Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2015/7983/0/07367672",
"title": "Using force plate, computer simulation and image alignment in jumping analysis",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2015/07367672/12OmNCbU2ZO",
"parentPublication": {
"id": "proceedings/bibe/2015/7983/0",
"title": "2015 IEEE 15th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icic/2010/7081/4/05514050",
"title": "On Hydrodynamics of Bionic Flapping Hydrofoil Propeller",
"doi": null,
"abstractUrl": "/proceedings-article/icic/2010/05514050/12OmNCctf9z",
"parentPublication": {
"id": "proceedings/icic/2010/7081/4",
"title": "2010 Third International Conference on Information and Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1992/2720/0/00220320",
"title": "A walking prescription for statically-stable walkers based on walker/terrain interaction",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1992/00220320/12OmNvA1hhp",
"parentPublication": {
"id": "proceedings/robot/1992/2720/0",
"title": "Proceedings 1992 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icia/2006/0528/0/04097745",
"title": "Basic Research on Power Assist Walking Leg Using Force/Velocity Control Strategies",
"doi": null,
"abstractUrl": "/proceedings-article/icia/2006/04097745/12OmNwCsdE7",
"parentPublication": {
"id": "proceedings/icia/2006/0528/0",
"title": "2006 International Conference on Information Acquisition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08267106",
"title": "Force Rendering and its Evaluation of a Friction-Based Walking Sensation Display for a Seated User",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08267106/13rRUwIF6dW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigcomp/2019/7789/0/08679400",
"title": "Force Arrow 2: A Novel Pseudo-Haptic Interface for Weight Perception in Lifting Virtual Objects",
"doi": null,
"abstractUrl": "/proceedings-article/bigcomp/2019/08679400/18XkloCZdW8",
"parentPublication": {
"id": "proceedings/bigcomp/2019/7789/0",
"title": "2019 IEEE International Conference on Big Data and Smart Computing (BigComp)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10005087",
"title": "SubLinearForce: Fully Sublinear-Time Force Computation for Large Complex Graph Drawing",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10005087/1JC5yDf0E5q",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797962",
"title": "Muscleblazer: Force-Feedback Suit for Immersive Experience",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797962/1cJ0Qo05MTm",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09875213",
"articleId": "1Glcx4nqUEg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09887904",
"articleId": "1GBRnHyZ1bW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1GIqqZey94Q",
"name": "ttg555501-09881908s1-supp1-3205181.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09881908s1-supp1-3205181.mp4",
"extension": "mp4",
"size": "71.8 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Glcx4nqUEg",
"doi": "10.1109/TVCG.2022.3201934",
"abstract": "A critical yet unresolved challenge in designing space-adaptive narratives for Augmented Reality (AR) is to provide consistently immersive user experiences anywhere, regardless of physical features specific to a space. For this, we present a comprehensive analysis on a series of user studies investigating how the size, density, and layout of real indoor spaces affect users playing Fragments, a space-adaptive AR detective game. Based on the studies, we assert that moderate levels of traversability and visual complexity afforded in counteracting combinations of size and complexity are beneficial for narrative experience. To confirm our argument, we combined the experimental data of the studies (n=112) to compare how five different spatial complexity conditions impact narrative experience when applied to contrasting room sizes. Results show that whereas factors of narrative experience are rated significantly higher in relatively simple settings for a small space, they are less affected by complexity in a large space. Ultimately, we establish guidelines on the design and placement of space-adaptive augmentations in location-independent AR narratives to compensate for the lack or excess of affordances in various real spaces and enhance user experiences therein.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A critical yet unresolved challenge in designing space-adaptive narratives for Augmented Reality (AR) is to provide consistently immersive user experiences anywhere, regardless of physical features specific to a space. For this, we present a comprehensive analysis on a series of user studies investigating how the size, density, and layout of real indoor spaces affect users playing Fragments, a space-adaptive AR detective game. Based on the studies, we assert that moderate levels of traversability and visual complexity afforded in counteracting combinations of size and complexity are beneficial for narrative experience. To confirm our argument, we combined the experimental data of the studies (n=112) to compare how five different spatial complexity conditions impact narrative experience when applied to contrasting room sizes. Results show that whereas factors of narrative experience are rated significantly higher in relatively simple settings for a small space, they are less affected by complexity in a large space. Ultimately, we establish guidelines on the design and placement of space-adaptive augmentations in location-independent AR narratives to compensate for the lack or excess of affordances in various real spaces and enhance user experiences therein.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A critical yet unresolved challenge in designing space-adaptive narratives for Augmented Reality (AR) is to provide consistently immersive user experiences anywhere, regardless of physical features specific to a space. For this, we present a comprehensive analysis on a series of user studies investigating how the size, density, and layout of real indoor spaces affect users playing Fragments, a space-adaptive AR detective game. Based on the studies, we assert that moderate levels of traversability and visual complexity afforded in counteracting combinations of size and complexity are beneficial for narrative experience. To confirm our argument, we combined the experimental data of the studies (n=112) to compare how five different spatial complexity conditions impact narrative experience when applied to contrasting room sizes. Results show that whereas factors of narrative experience are rated significantly higher in relatively simple settings for a small space, they are less affected by complexity in a large space. Ultimately, we establish guidelines on the design and placement of space-adaptive augmentations in location-independent AR narratives to compensate for the lack or excess of affordances in various real spaces and enhance user experiences therein.",
"title": "The Effects of Spatial Complexity on Narrative Experience in Space-Adaptive AR Storytelling",
"normalizedTitle": "The Effects of Spatial Complexity on Narrative Experience in Space-Adaptive AR Storytelling",
"fno": "09875213",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Complexity Theory",
"Visualization",
"Affordances",
"Games",
"Layout",
"Usability",
"Task Analysis",
"Augmented Reality",
"Head Mounted Displays",
"Narrative Experience",
"Space Adaptivity",
"Spatial Affordance",
"Spatial Mapping"
],
"authors": [
{
"givenName": "Jae-eun",
"surname": "Shin",
"fullName": "Jae-eun Shin",
"affiliation": "KAIST ARRC, Daejeon, Republic of Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Boram",
"surname": "Yoon",
"fullName": "Boram Yoon",
"affiliation": "KAIST UVR Lab, Daejeon, Republic of Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dooyoung",
"surname": "Kim",
"fullName": "Dooyoung Kim",
"affiliation": "KAIST UVR Lab, Daejeon, Republic of Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Woontack",
"surname": "Woo",
"fullName": "Woontack Woo",
"affiliation": "KAIST UVR Lab, Daejeon, Republic of Korea",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-09-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icalt/2015/7334/0/7334a233",
"title": "What Happened to Non-linear Narrative? A Pedagogical Reflection",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2015/7334a233/12OmNBlofQm",
"parentPublication": {
"id": "proceedings/icalt/2015/7334/0",
"title": "2015 IEEE 15th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2016/2722/0/07590383",
"title": "Yoway: Coupling Narrative Structure with Physical Exploration in Multi-Linear Locative Narratives",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2016/07590383/12OmNqN6R42",
"parentPublication": {
"id": "proceedings/vs-games/2016/2722/0",
"title": "2016 8th International Conference on Games and Virtual Worlds for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2013/2246/0/2246a214",
"title": "Designing Narrative Interface with a Function of Narrative Generation",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2013/2246a214/12OmNsdo6u8",
"parentPublication": {
"id": "proceedings/cw/2013/2246/0",
"title": "2013 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/Ismar-mashd/2014/6887/0/06935436",
"title": "nARratives of augmented worlds",
"doi": null,
"abstractUrl": "/proceedings-article/Ismar-mashd/2014/06935436/12OmNzw8j8Y",
"parentPublication": {
"id": "proceedings/Ismar-mashd/2014/6887/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality - Media, Art, Social Science, Humanities and Design (ISMAR-MASH'D)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ci/2014/02/06657816",
"title": "Analysis of ReGEN as a Graph-Rewriting System for Quest Generation",
"doi": null,
"abstractUrl": "/journal/ci/2014/02/06657816/13rRUyuNszA",
"parentPublication": {
"id": "trans/ci",
"title": "IEEE Transactions on Computational Intelligence and AI in Games",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ci/2014/02/06832695",
"title": "Guest Editorial: Computational Narrative and Games",
"doi": null,
"abstractUrl": "/journal/ci/2014/02/06832695/13rRUzpQPNY",
"parentPublication": {
"id": "trans/ci",
"title": "IEEE Transactions on Computational Intelligence and AI in Games",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2018/9605/0/960500a187",
"title": "Personality and Preference Modeling for Adaptive Storytelling",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2018/960500a187/17D45VTRoxd",
"parentPublication": {
"id": "proceedings/sbgames/2018/9605/0",
"title": "2018 17th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imet/2022/7016/0/09929872",
"title": "Herstory: an AR storytelling application presenting women's heroic lives in public space",
"doi": null,
"abstractUrl": "/proceedings-article/imet/2022/09929872/1HYuTEdgFOM",
"parentPublication": {
"id": "proceedings/imet/2022/7016/0",
"title": "2022 International Conference on Interactive Media, Smart Systems and Emerging Technologies (IMET)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a289",
"title": "Using Space Syntax to Enable Walkable AR Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a289/1pBMiBe8hlC",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vizsec/2021/2085/0/208500a001",
"title": "Automatic Narrative Summarization for Visualizing Cyber Security Logs and Incident Reports",
"doi": null,
"abstractUrl": "/proceedings-article/vizsec/2021/208500a001/1z93N3tNIaY",
"parentPublication": {
"id": "proceedings/vizsec/2021/2085/0",
"title": "2021 IEEE Symposium on Visualization for Cyber Security (VizSec)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09870679",
"articleId": "1GgcTinkSbK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09881908",
"articleId": "1Gv909WpCG4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1GeVHT5oVtC",
"doi": "10.1109/TVCG.2022.3202240",
"abstract": "Accurately estimating the human inner-body under clothing is very important for body measurement, virtual try-on and VR/AR applications. In this paper, we propose the first method to allow everyone to easily reconstruct their own 3D inner-body under daily clothing from a self-captured video with the mean reconstruction error of 0.73 cm within 15 s. This avoids privacy concerns arising from nudity or minimal clothing. Specifically, we propose a novel two-stage framework with a Semantic-guided Undressing Network (SUNet) and an Intra-Inter Transformer Network (IITNet). SUNet learns semantically related body features to alleviate the complexity and uncertainty of directly estimating 3D inner-bodies under clothing. IITNet reconstructs the 3D inner-body model by making full use of intra-frame and inter-frame information, which addresses the misalignment of inconsistent poses in different frames. Experimental results on both public datasets and our collected dataset demonstrate the effectiveness of the proposed method. The code and dataset is available for research purposes at <uri>http://cic.tju.edu.cn/faculty/likun/projects/Inner-Body</uri>.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Accurately estimating the human inner-body under clothing is very important for body measurement, virtual try-on and VR/AR applications. In this paper, we propose the first method to allow everyone to easily reconstruct their own 3D inner-body under daily clothing from a self-captured video with the mean reconstruction error of 0.73 cm within 15 s. This avoids privacy concerns arising from nudity or minimal clothing. Specifically, we propose a novel two-stage framework with a Semantic-guided Undressing Network (SUNet) and an Intra-Inter Transformer Network (IITNet). SUNet learns semantically related body features to alleviate the complexity and uncertainty of directly estimating 3D inner-bodies under clothing. IITNet reconstructs the 3D inner-body model by making full use of intra-frame and inter-frame information, which addresses the misalignment of inconsistent poses in different frames. Experimental results on both public datasets and our collected dataset demonstrate the effectiveness of the proposed method. The code and dataset is available for research purposes at <uri>http://cic.tju.edu.cn/faculty/likun/projects/Inner-Body</uri>.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Accurately estimating the human inner-body under clothing is very important for body measurement, virtual try-on and VR/AR applications. In this paper, we propose the first method to allow everyone to easily reconstruct their own 3D inner-body under daily clothing from a self-captured video with the mean reconstruction error of 0.73 cm within 15 s. This avoids privacy concerns arising from nudity or minimal clothing. Specifically, we propose a novel two-stage framework with a Semantic-guided Undressing Network (SUNet) and an Intra-Inter Transformer Network (IITNet). SUNet learns semantically related body features to alleviate the complexity and uncertainty of directly estimating 3D inner-bodies under clothing. IITNet reconstructs the 3D inner-body model by making full use of intra-frame and inter-frame information, which addresses the misalignment of inconsistent poses in different frames. Experimental results on both public datasets and our collected dataset demonstrate the effectiveness of the proposed method. The code and dataset is available for research purposes at http://cic.tju.edu.cn/faculty/likun/projects/Inner-Body.",
"title": "Learning to Infer Inner-Body Under Clothing From Monocular Video",
"normalizedTitle": "Learning to Infer Inner-Body Under Clothing From Monocular Video",
"fno": "09869633",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Three Dimensional Displays",
"Shape",
"Clothing",
"Image Reconstruction",
"Cameras",
"Transformers",
"Solid Modeling",
"Inner Body",
"Reconstruction",
"Single RGB Camera",
"Transformer",
"Under Clothing"
],
"authors": [
{
"givenName": "Xiongzheng",
"surname": "Li",
"fullName": "Xiongzheng Li",
"affiliation": "College of Intelligence and Computing, Tianjin University, Tianjin, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jing",
"surname": "Huang",
"fullName": "Jing Huang",
"affiliation": "College of Intelligence and Computing, Tianjin University, Tianjin, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jinsong",
"surname": "Zhang",
"fullName": "Jinsong Zhang",
"affiliation": "College of Intelligence and Computing, Tianjin University, Tianjin, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaokun",
"surname": "Sun",
"fullName": "Xiaokun Sun",
"affiliation": "College of Intelligence and Computing, Tianjin University, Tianjin, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haibiao",
"surname": "Xuan",
"fullName": "Haibiao Xuan",
"affiliation": "College of Intelligence and Computing, Tianjin University, Tianjin, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yu-Kun",
"surname": "Lai",
"fullName": "Yu-Kun Lai",
"affiliation": "School of Computer Science and Informatics, Cardiff University, Cardiff, United Kingdom",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yingdi",
"surname": "Xie",
"fullName": "Yingdi Xie",
"affiliation": "VRC Inc., Tokyo, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jingyu",
"surname": "Yang",
"fullName": "Jingyu Yang",
"affiliation": "School of Electrical and Information Engineering, Tianjin University, Tianjin, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kun",
"surname": "Li",
"fullName": "Kun Li",
"affiliation": "College of Intelligence and Computing, Tianjin University, Tianjin, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-08-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2015/7082/0/07177402",
"title": "Seeing through the appearance: Body shape estimation using multi-view clothing images",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2015/07177402/12OmNCd2rQk",
"parentPublication": {
"id": "proceedings/icme/2015/7082/0",
"title": "2015 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000c193",
"title": "Clothing Change Aware Person Identification",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000c193/17D45WHONnI",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500a968",
"title": "Fashion Is Taking Shape: Understanding Clothing Preference Based on Body Shape From Online Sources",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500a968/18j8H2oJL6U",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scset/2022/7876/0/787600a199",
"title": "Research and Prospect of 3D Virtual Display Technology of Clothing",
"doi": null,
"abstractUrl": "/proceedings-article/scset/2022/787600a199/1ANLZahi1sQ",
"parentPublication": {
"id": "proceedings/scset/2022/7876/0",
"title": "2022 International Seminar on Computer Science and Engineering Technology (SCSET)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2022/5670/0/567000a679",
"title": "Neural Point-based Shape Modeling of Humans in Challenging Clothing",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2022/567000a679/1KYsvi8qLS0",
"parentPublication": {
"id": "proceedings/3dv/2022/5670/0",
"title": "2022 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300k0510",
"title": "VTNFP: An Image-Based Virtual Try-On Network With Body and Clothing Feature Preservation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300k0510/1hVlSD4rLA4",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800g468",
"title": "Learning to Dress 3D People in Generative Clothing",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800g468/1m3nwUHFD68",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a322",
"title": "MonoClothCap: Towards Temporally Coherent Clothing Capture from Monocular RGB Video",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a322/1qyxk1bcV5S",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a879",
"title": "PeeledHuman: Robust Shape Representation for Textured 3D Human Body Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a879/1qyxmAvhFD2",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/01/09664343",
"title": "HUMBI: A Large Multiview Dataset of Human Body Expressions and Benchmark Challenge",
"doi": null,
"abstractUrl": "/journal/tp/2023/01/09664343/1zHDArFNvKU",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09866819",
"articleId": "1G7UilgWye4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09870173",
"articleId": "1GgcSqKQSM8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1GhRXjsjCP6",
"name": "ttg555501-09869633s1-supp1-3202240.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09869633s1-supp1-3202240.mp4",
"extension": "mp4",
"size": "65.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1GgcSqKQSM8",
"doi": "10.1109/TVCG.2022.3202503",
"abstract": "This paper addresses the challenge of human performance capture from sparse multi-view or monocular videos. Given a template mesh of the performer, previous methods capture the human motion by non-rigidly registering the template mesh to images with 2D silhouettes or dense photometric alignment. However, the detailed surface deformation cannot be recovered from the silhouettes, while the photometric alignment suffers from instability caused by appearance variation in the videos. To solve these problems, we propose NerfCap, a novel performance capture method based on the dynamic neural radiance field (NeRF) representation of the performer. Specifically, a canonical NeRF is initialized from the template geometry and registered to the video frames by optimizing the deformation field and the appearance model of the canonical NeRF. To capture both large body motion and detailed surface deformation, NerfCap combines linear blend skinning with embedded graph deformation. In contrast to the mesh-based methods that suffer from fixed topology and texture, NerfCap is able to flexibly capture complex geometry and appearance variation across the videos, and synthesize more photo-realistic images. In addition, NerfCap can be pre-trained end to end in a self-supervised manner by matching the synthesized videos with the input videos. Experimental results on various datasets show that NerfCap outperforms prior works in terms of both surface reconstruction accuracy and novel-view synthesis quality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper addresses the challenge of human performance capture from sparse multi-view or monocular videos. Given a template mesh of the performer, previous methods capture the human motion by non-rigidly registering the template mesh to images with 2D silhouettes or dense photometric alignment. However, the detailed surface deformation cannot be recovered from the silhouettes, while the photometric alignment suffers from instability caused by appearance variation in the videos. To solve these problems, we propose NerfCap, a novel performance capture method based on the dynamic neural radiance field (NeRF) representation of the performer. Specifically, a canonical NeRF is initialized from the template geometry and registered to the video frames by optimizing the deformation field and the appearance model of the canonical NeRF. To capture both large body motion and detailed surface deformation, NerfCap combines linear blend skinning with embedded graph deformation. In contrast to the mesh-based methods that suffer from fixed topology and texture, NerfCap is able to flexibly capture complex geometry and appearance variation across the videos, and synthesize more photo-realistic images. In addition, NerfCap can be pre-trained end to end in a self-supervised manner by matching the synthesized videos with the input videos. Experimental results on various datasets show that NerfCap outperforms prior works in terms of both surface reconstruction accuracy and novel-view synthesis quality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper addresses the challenge of human performance capture from sparse multi-view or monocular videos. Given a template mesh of the performer, previous methods capture the human motion by non-rigidly registering the template mesh to images with 2D silhouettes or dense photometric alignment. However, the detailed surface deformation cannot be recovered from the silhouettes, while the photometric alignment suffers from instability caused by appearance variation in the videos. To solve these problems, we propose NerfCap, a novel performance capture method based on the dynamic neural radiance field (NeRF) representation of the performer. Specifically, a canonical NeRF is initialized from the template geometry and registered to the video frames by optimizing the deformation field and the appearance model of the canonical NeRF. To capture both large body motion and detailed surface deformation, NerfCap combines linear blend skinning with embedded graph deformation. In contrast to the mesh-based methods that suffer from fixed topology and texture, NerfCap is able to flexibly capture complex geometry and appearance variation across the videos, and synthesize more photo-realistic images. In addition, NerfCap can be pre-trained end to end in a self-supervised manner by matching the synthesized videos with the input videos. Experimental results on various datasets show that NerfCap outperforms prior works in terms of both surface reconstruction accuracy and novel-view synthesis quality.",
"title": "NerfCap: Human Performance Capture With Dynamic Neural Radiance Fields",
"normalizedTitle": "NerfCap: Human Performance Capture With Dynamic Neural Radiance Fields",
"fno": "09870173",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Strain",
"Videos",
"Geometry",
"Three Dimensional Displays",
"Deformable Models",
"Clothing",
"Image Reconstruction",
"Dynamic Neural Radiance Fields",
"Human Deformation Fields",
"Human Performance Capture"
],
"authors": [
{
"givenName": "Kangkan",
"surname": "Wang",
"fullName": "Kangkan Wang",
"affiliation": "Key Lab of Intelligent Perception and Systems for High-Dimensional Information of Ministry of Education, School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, P.R. China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sida",
"surname": "Peng",
"fullName": "Sida Peng",
"affiliation": "State Key Lab of CAD&CG, Zijingang Campus, Zhejiang University, Hangzhou, P.R. China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaowei",
"surname": "Zhou",
"fullName": "Xiaowei Zhou",
"affiliation": "State Key Lab of CAD&CG, Zijingang Campus, Zhejiang University, Hangzhou, P.R. China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jian",
"surname": "Yang",
"fullName": "Jian Yang",
"affiliation": "Key Lab of Intelligent Perception and Systems for High-Dimensional Information of Ministry of Education, School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, P.R. China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guofeng",
"surname": "Zhang",
"fullName": "Guofeng Zhang",
"affiliation": "State Key Lab of CAD&CG, Zijingang Campus, Zhejiang University, Hangzhou, P.R. China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-08-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2021/2812/0/281200f826",
"title": "Self-Calibrating Neural Radiance Fields",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200f826/1BmEiCkWfU4",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200f845",
"title": "Nerfies: Deformable Neural Radiance Fields",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200f845/1BmL0KETWzm",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/09888037",
"title": "MPS-NeRF: Generalizable 3D Human Rendering From Multiview Images",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/09888037/1GBRkqcf7m8",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600s8332",
"title": "NeRF-Editing: Geometry Editing of Neural Radiance Fields",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600s8332/1H0Nn4Xgsne",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600m2851",
"title": "Deblur-NeRF: Neural Radiance Fields from Blurry Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600m2851/1H1kFc1BMLS",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600f428",
"title": "Point-NeRF: Point-based Neural Radiance Fields",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600f428/1H1mrGLgvra",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2022/5670/0/567000a606",
"title": "Cross-Spectral Neural Radiance Fields",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2022/567000a606/1KYsqz6IyGI",
"parentPublication": {
"id": "proceedings/3dv/2022/5670/0",
"title": "2022 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600a795",
"title": "Beyond RGB: Scene-Property Synthesis with Neural Radiance Fields",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600a795/1KxVhi7yhR6",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300f499",
"title": "SimulCap : Single-View Human Performance Capture With Cloth Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300f499/1gyrcfyAKI0",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a322",
"title": "MonoClothCap: Towards Temporally Coherent Clothing Capture from Monocular RGB Video",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a322/1qyxk1bcV5S",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09869633",
"articleId": "1GeVHT5oVtC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09870679",
"articleId": "1GgcTinkSbK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1Gqat3C16zS",
"name": "ttg555501-09870173s1-supp1-3202503.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09870173s1-supp1-3202503.mp4",
"extension": "mp4",
"size": "94.8 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1GgcTinkSbK",
"doi": "10.1109/TVCG.2022.3201101",
"abstract": "Arguably the most representative application of artificial intelligence, autonomous driving systems usually rely on computer vision techniques to detect the situations of the external environment. Object detection underpins the ability of scene understanding in such systems. However, existing object detection algorithms often behave as a black box, so when a model fails, no information is available on <italic>When, Where</italic> and <italic>How</italic> the failure happened. In this paper, we propose a visual analytics approach to help model developers interpret the model failures. The system includes the <italic>micro-</italic> and <italic>macro-</italic>interpreting modules to address the interpretability problem of object detection in autonomous driving. The <italic>micro-</italic>interpreting module extracts and visualizes the features of a convolutional neural network (CNN) algorithm with density maps, while the <italic>macro-</italic>interpreting module provides spatial-temporal information of an autonomous driving vehicle and its environment. With the situation awareness of the spatial, temporal and neural network information, our system facilitates the understanding of the results of object detection algorithms, and helps the model developers better understand, tune and develop the models. We use real-world autonomous driving data to perform case studies by involving domain experts in computer vision and autonomous driving to evaluate our system. The results from our interviews with them show the effectiveness of our approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Arguably the most representative application of artificial intelligence, autonomous driving systems usually rely on computer vision techniques to detect the situations of the external environment. Object detection underpins the ability of scene understanding in such systems. However, existing object detection algorithms often behave as a black box, so when a model fails, no information is available on <italic>When, Where</italic> and <italic>How</italic> the failure happened. In this paper, we propose a visual analytics approach to help model developers interpret the model failures. The system includes the <italic>micro-</italic> and <italic>macro-</italic>interpreting modules to address the interpretability problem of object detection in autonomous driving. The <italic>micro-</italic>interpreting module extracts and visualizes the features of a convolutional neural network (CNN) algorithm with density maps, while the <italic>macro-</italic>interpreting module provides spatial-temporal information of an autonomous driving vehicle and its environment. With the situation awareness of the spatial, temporal and neural network information, our system facilitates the understanding of the results of object detection algorithms, and helps the model developers better understand, tune and develop the models. We use real-world autonomous driving data to perform case studies by involving domain experts in computer vision and autonomous driving to evaluate our system. The results from our interviews with them show the effectiveness of our approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Arguably the most representative application of artificial intelligence, autonomous driving systems usually rely on computer vision techniques to detect the situations of the external environment. Object detection underpins the ability of scene understanding in such systems. However, existing object detection algorithms often behave as a black box, so when a model fails, no information is available on When, Where and How the failure happened. In this paper, we propose a visual analytics approach to help model developers interpret the model failures. The system includes the micro- and macro-interpreting modules to address the interpretability problem of object detection in autonomous driving. The micro-interpreting module extracts and visualizes the features of a convolutional neural network (CNN) algorithm with density maps, while the macro-interpreting module provides spatial-temporal information of an autonomous driving vehicle and its environment. With the situation awareness of the spatial, temporal and neural network information, our system facilitates the understanding of the results of object detection algorithms, and helps the model developers better understand, tune and develop the models. We use real-world autonomous driving data to perform case studies by involving domain experts in computer vision and autonomous driving to evaluate our system. The results from our interviews with them show the effectiveness of our approach.",
"title": "When, Where and How does it fail? A Spatial-temporal Visual Analytics Approach for Interpretable Object Detection in Autonomous Driving",
"normalizedTitle": "When, Where and How does it fail? A Spatial-temporal Visual Analytics Approach for Interpretable Object Detection in Autonomous Driving",
"fno": "09870679",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Autonomous Vehicles",
"Analytical Models",
"Object Detection",
"Visual Analytics",
"Artificial Intelligence",
"Three Dimensional Displays",
"Data Visualization",
"Autonomous Driving",
"Spatial Temporal Visual Analytics",
"Interpretability"
],
"authors": [
{
"givenName": "Junhong",
"surname": "Wang",
"fullName": "Junhong Wang",
"affiliation": "School of Data Science, Fudan University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yun",
"surname": "Li",
"fullName": "Yun Li",
"affiliation": "School of Data Science, Fudan University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhaoyu",
"surname": "Zhou",
"fullName": "Zhaoyu Zhou",
"affiliation": "School of Data Science, Fudan University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chengshun",
"surname": "Wang",
"fullName": "Chengshun Wang",
"affiliation": "School of Data Science, Fudan University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yijie",
"surname": "Hou",
"fullName": "Yijie Hou",
"affiliation": "School of Data Science, Fudan University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Li",
"surname": "Zhang",
"fullName": "Li Zhang",
"affiliation": "School of Data Science, Fudan University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiangyang",
"surname": "Xue",
"fullName": "Xiangyang Xue",
"affiliation": "School of Data Science, Fudan University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Michael",
"surname": "Kamp",
"fullName": "Michael Kamp",
"affiliation": "Institute for AI in Medicine (IKIM), UK Essen, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaolong",
"surname": "Zhang",
"fullName": "Xiaolong Zhang",
"affiliation": "Pennsylvania State University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Siming",
"surname": "Chen",
"fullName": "Siming Chen",
"affiliation": "School of Data Science, Fudan University, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-08-01 00:00:00",
"pubType": "trans",
"pages": "1-16",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/isorc/2018/5847/0/584701a107",
"title": "Cost-Effective Redundancy Approach for Fail-Operational Autonomous Driving System",
"doi": null,
"abstractUrl": "/proceedings-article/isorc/2018/584701a107/12OmNC8uRnN",
"parentPublication": {
"id": "proceedings/isorc/2018/5847/0",
"title": "2018 IEEE 21st International Symposium on Real-Time Distributed Computing (ISORC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904017",
"title": "Visual Concept Programming: A Visual Analytics Approach to Injecting Human Intelligence at Scale",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904017/1H0GlgwfKak",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2023/02/10042093",
"title": "Trustworthy Artificial Intelligence Requirements in the Autonomous Driving Domain",
"doi": null,
"abstractUrl": "/magazine/co/2023/02/10042093/1KEti7i59w4",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2019/03/08817317",
"title": "Multimedia for Autonomous Driving",
"doi": null,
"abstractUrl": "/magazine/mu/2019/03/08817317/1cPWP7sFJsI",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ica/2019/4026/0/08929214",
"title": "Comfortable Driving by using Deep Inverse Reinforcement Learning",
"doi": null,
"abstractUrl": "/proceedings-article/ica/2019/08929214/1fJRQOpS23u",
"parentPublication": {
"id": "proceedings/ica/2019/4026/0",
"title": "2019 IEEE International Conference on Agents (ICA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoin/2020/4199/0/09016591",
"title": "Fast and Reliable Offloading via Deep Reinforcement Learning for Mobile Edge Video Computing",
"doi": null,
"abstractUrl": "/proceedings-article/icoin/2020/09016591/1hQqW49dsmk",
"parentPublication": {
"id": "proceedings/icoin/2020/4199/0",
"title": "2020 International Conference on Information Networking (ICOIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09233993",
"title": "<italic>VATLD</italic>: A <italic>V</italic>isual <italic>A</italic>nalytics System to Assess, Understand and Improve <italic>T</italic>raffic <italic>L</italic>ight <italic>D</italic>etection",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09233993/1o53W7V42CQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aitest/2021/3481/0/348100a081",
"title": "An Industrial Workbench for Test Scenario Identification for Autonomous Driving Software",
"doi": null,
"abstractUrl": "/proceedings-article/aitest/2021/348100a081/1xH9KOJDORa",
"parentPublication": {
"id": "proceedings/aitest/2021/3481/0",
"title": "2021 IEEE International Conference On Artificial Intelligence Testing (AITest)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552909",
"title": "<italic>Where Can We Help</italic>? A Visual Analytics Approach to Diagnosing and Improving Semantic Segmentation of Movable Objects",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552909/1xibW2zLd9C",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09597616",
"title": "Visual Evaluation for Autonomous Driving",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09597616/1yezimL3oTS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09870173",
"articleId": "1GgcSqKQSM8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09875213",
"articleId": "1Glcx4nqUEg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1GhRZ0BQwkU",
"name": "ttg555501-09870679s1-supp1-3201101.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09870679s1-supp1-3201101.mp4",
"extension": "mp4",
"size": "35.6 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1G6ALnVS50I",
"doi": "10.1109/TVCG.2022.3201120",
"abstract": "Recent augmented reality (AR) advancements have enabled the development of effective training systems, especially in the medical, rehabilitation, and industrial fields. However, it is unclear from the literature what the intrinsic value of AR to training is and how it differs across multiple application fields. In this work, we gathered and reviewed the prototypes and applications geared towards training the intended user's knowledge, skills, and abilities. Specifically, from IEEE Xplore plus other digital libraries, we collected 64 research papers present in high-impact publications about augmented reality training systems (ARTS). All 64 papers were then categorized according to the training method used, and each paper's evaluations were identified by validity. The summary of the results shows trends in the training methods and evaluations that incorporate ARTS in each field. The narrative synthesis illustrates the different implementations of AR for each of the training methods. In addition, examples of the different evaluation types of the current ARTS are described for each of the aforementioned training methods. We also investigated the different training strategies used by the prevailing ARTS. The insights gleaned from this review can suggest standards for designing ARTS regarding training strategy, and recommendations are provided for the implementation and evaluation of future ARTS.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recent augmented reality (AR) advancements have enabled the development of effective training systems, especially in the medical, rehabilitation, and industrial fields. However, it is unclear from the literature what the intrinsic value of AR to training is and how it differs across multiple application fields. In this work, we gathered and reviewed the prototypes and applications geared towards training the intended user's knowledge, skills, and abilities. Specifically, from IEEE Xplore plus other digital libraries, we collected 64 research papers present in high-impact publications about augmented reality training systems (ARTS). All 64 papers were then categorized according to the training method used, and each paper's evaluations were identified by validity. The summary of the results shows trends in the training methods and evaluations that incorporate ARTS in each field. The narrative synthesis illustrates the different implementations of AR for each of the training methods. In addition, examples of the different evaluation types of the current ARTS are described for each of the aforementioned training methods. We also investigated the different training strategies used by the prevailing ARTS. The insights gleaned from this review can suggest standards for designing ARTS regarding training strategy, and recommendations are provided for the implementation and evaluation of future ARTS.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recent augmented reality (AR) advancements have enabled the development of effective training systems, especially in the medical, rehabilitation, and industrial fields. However, it is unclear from the literature what the intrinsic value of AR to training is and how it differs across multiple application fields. In this work, we gathered and reviewed the prototypes and applications geared towards training the intended user's knowledge, skills, and abilities. Specifically, from IEEE Xplore plus other digital libraries, we collected 64 research papers present in high-impact publications about augmented reality training systems (ARTS). All 64 papers were then categorized according to the training method used, and each paper's evaluations were identified by validity. The summary of the results shows trends in the training methods and evaluations that incorporate ARTS in each field. The narrative synthesis illustrates the different implementations of AR for each of the training methods. In addition, examples of the different evaluation types of the current ARTS are described for each of the aforementioned training methods. We also investigated the different training strategies used by the prevailing ARTS. The insights gleaned from this review can suggest standards for designing ARTS regarding training strategy, and recommendations are provided for the implementation and evaluation of future ARTS.",
"title": "Systematic Review of Augmented Reality Training Systems",
"normalizedTitle": "Systematic Review of Augmented Reality Training Systems",
"fno": "09866555",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Training",
"Subspace Constraints",
"Task Analysis",
"Market Research",
"Augmented Reality",
"Systematics",
"Real Time Systems",
"Augmented Reality Training Systems",
"Evaluation",
"Narrative Synthesis",
"Systematic Review",
"Training Method"
],
"authors": [
{
"givenName": "Isidro M.",
"surname": "Butaslac",
"fullName": "Isidro M. Butaslac",
"affiliation": "Graduate School of Science and Technology, Nara Institute of Science and Technology, Ikoma, Nara, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yuichiro",
"surname": "Fujimoto",
"fullName": "Yuichiro Fujimoto",
"affiliation": "Graduate School of Science and Technology, Nara Institute of Science and Technology, Ikoma, Nara, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Taishi",
"surname": "Sawabe",
"fullName": "Taishi Sawabe",
"affiliation": "Graduate School of Science and Technology, Nara Institute of Science and Technology, Ikoma, Nara, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Masayuki",
"surname": "Kanbara",
"fullName": "Masayuki Kanbara",
"affiliation": "Graduate School of Science and Technology, Nara Institute of Science and Technology, Ikoma, Nara, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hirokazu",
"surname": "Kato",
"fullName": "Hirokazu Kato",
"affiliation": "Graduate School of Science and Technology, Nara Institute of Science and Technology, Ikoma, Nara, Japan",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-08-01 00:00:00",
"pubType": "trans",
"pages": "1-20",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismarw/2016/3740/0/07836457",
"title": "A Systematic Review of Usability Studies in Augmented Reality between 2005 and 2014",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836457/12OmNvs4vsh",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciibms/2017/6664/0/08279706",
"title": "A medical training system using augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/iciibms/2017/08279706/12OmNwErpJY",
"parentPublication": {
"id": "proceedings/iciibms/2017/6664/0",
"title": "2017 International Conference on Intelligent Informatics and Biomedical Sciences (ICIIBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2006/0224/0/02240249",
"title": "Augmented Reality for Urban Skills Training",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2006/02240249/12OmNx4yvB1",
"parentPublication": {
"id": "proceedings/vr/2006/0224/0",
"title": "IEEE Virtual Reality Conference (VR 2006)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2017/3588/0/3588a227",
"title": "Augmented Reality and Serious Games: A Systematic Literature Mapping",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2017/3588a227/12OmNyQGS43",
"parentPublication": {
"id": "proceedings/svr/2017/3588/0",
"title": "2017 19th Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/04/09676467",
"title": "Analytic Review of Using Augmented Reality for Situational Awareness",
"doi": null,
"abstractUrl": "/journal/tg/2023/04/09676467/1A3dpoxdxgQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a036",
"title": "Deceiving Audio Design in Augmented Environments : A Systematic Review of Audio Effects in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a036/1J7WbNX5pD2",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a128",
"title": "Multi-vehicle Cooperative Military Training Simulation System Based on Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a128/1gysonjGAqA",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscv/2020/8041/0/09204125",
"title": "Augmented reality for children with Autism Spectrum Disorder - A systematic review",
"doi": null,
"abstractUrl": "/proceedings-article/iscv/2020/09204125/1nmi7vlqm9q",
"parentPublication": {
"id": "proceedings/iscv/2020/8041/0",
"title": "2020 International Conference on Intelligent Systems and Computer Vision (ISCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a199",
"title": "A Systematic Review of Rapid Prototyping Tools for Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a199/1oZBDgufHnq",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a438",
"title": "Evaluating Mixed and Augmented Reality: A Systematic Literature Review (2009-2019)",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a438/1pysxe8SeyY",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09858334",
"articleId": "1FUYBSBjTRm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09866819",
"articleId": "1G7UilgWye4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1G7UilgWye4",
"doi": "10.1109/TVCG.2022.3201567",
"abstract": "Graph sampling frequently compresses a large graph into a limited screen space. This paper proposes a hierarchical structure model that partitions scale-free graphs into three blocks: the core, which captures the underlying community structure, the vertical graph, which represents minority structures that are important in visual analysis, and the periphery, which describes the connection structure between low-degree nodes. A new algorithm named hierarchical structure sampling (HSS) was then designed to preserve the characteristics of the three blocks, including complete replication of the connection relationship between high-degree nodes in the core, joint node/degree distribution between high- and low-degree nodes in the vertical graph, and proportional replication of the connection relationship between low-degree nodes in the periphery. Finally, the importance of some global statistical properties in visualization was analyzed. Both the global statistical properties and local visual features were used to evaluate the proposed algorithm, which verify that the algorithm can be applied to sample scale-free graphs with hundreds to one million nodes from a visualization perspective.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Graph sampling frequently compresses a large graph into a limited screen space. This paper proposes a hierarchical structure model that partitions scale-free graphs into three blocks: the core, which captures the underlying community structure, the vertical graph, which represents minority structures that are important in visual analysis, and the periphery, which describes the connection structure between low-degree nodes. A new algorithm named hierarchical structure sampling (HSS) was then designed to preserve the characteristics of the three blocks, including complete replication of the connection relationship between high-degree nodes in the core, joint node/degree distribution between high- and low-degree nodes in the vertical graph, and proportional replication of the connection relationship between low-degree nodes in the periphery. Finally, the importance of some global statistical properties in visualization was analyzed. Both the global statistical properties and local visual features were used to evaluate the proposed algorithm, which verify that the algorithm can be applied to sample scale-free graphs with hundreds to one million nodes from a visualization perspective.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Graph sampling frequently compresses a large graph into a limited screen space. This paper proposes a hierarchical structure model that partitions scale-free graphs into three blocks: the core, which captures the underlying community structure, the vertical graph, which represents minority structures that are important in visual analysis, and the periphery, which describes the connection structure between low-degree nodes. A new algorithm named hierarchical structure sampling (HSS) was then designed to preserve the characteristics of the three blocks, including complete replication of the connection relationship between high-degree nodes in the core, joint node/degree distribution between high- and low-degree nodes in the vertical graph, and proportional replication of the connection relationship between low-degree nodes in the periphery. Finally, the importance of some global statistical properties in visualization was analyzed. Both the global statistical properties and local visual features were used to evaluate the proposed algorithm, which verify that the algorithm can be applied to sample scale-free graphs with hundreds to one million nodes from a visualization perspective.",
"title": "Hierarchical Sampling for the Visualization of Large Scale-Free Graphs",
"normalizedTitle": "Hierarchical Sampling for the Visualization of Large Scale-Free Graphs",
"fno": "09866819",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visualization",
"Partitioning Algorithms",
"Scalability",
"Generators",
"Shape Measurement",
"Clustering Algorithms",
"Analytical Models",
"Graph Sampling",
"Large Scale Free Graph",
"Graph Visualization"
],
"authors": [
{
"givenName": "Bo",
"surname": "Jiao",
"fullName": "Bo Jiao",
"affiliation": "School of Information Science and Technology, Xiamen University Tan Kah Kee College, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xin",
"surname": "Lu",
"fullName": "Xin Lu",
"affiliation": "School of Mathematics and BigData, Foshan University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jingbo",
"surname": "Xia",
"fullName": "Jingbo Xia",
"affiliation": "School of Information Science and Technology, Xiamen University Tan Kah Kee College, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Brij Bhooshan",
"surname": "Gupta",
"fullName": "Brij Bhooshan Gupta",
"affiliation": "Department of Computer Science and Information Engineering, Asia University, Taichung, Taiwan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lei",
"surname": "Bao",
"fullName": "Lei Bao",
"affiliation": "School of Information Science and Technology, Xiamen University Tan Kah Kee College, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Qingshan",
"surname": "Zhou",
"fullName": "Qingshan Zhou",
"affiliation": "School of Mathematics and BigData, Foshan University, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-08-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icdcs/2018/6871/0/687101a567",
"title": "Generating Synthetic Social Graphs with Darwini",
"doi": null,
"abstractUrl": "/proceedings-article/icdcs/2018/687101a567/12OmNqHItJp",
"parentPublication": {
"id": "proceedings/icdcs/2018/6871/0",
"title": "2018 IEEE 38th International Conference on Distributed Computing Systems (ICDCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sc/2011/0771/0/06114462",
"title": "A scalable eigensolver for large scale-free graphs using 2D graph partitioning",
"doi": null,
"abstractUrl": "/proceedings-article/sc/2011/06114462/12OmNrkBwGL",
"parentPublication": {
"id": "proceedings/sc/2011/0771/0",
"title": "2011 International Conference for High Performance Computing, Networking, Storage and Analysis (SC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2014/5666/0/07004219",
"title": "Parallel Breadth First Search on GPU clusters",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2014/07004219/12OmNwbLVtE",
"parentPublication": {
"id": "proceedings/big-data/2014/5666/0",
"title": "2014 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2015/8493/0/8493a493",
"title": "StructMatrix: Large-Scale Visualization of Graphs by Means of Structure Detection and Dense Matrices",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2015/8493a493/12OmNz4Bdni",
"parentPublication": {
"id": "proceedings/icdmw/2015/8493/0",
"title": "2015 IEEE International Conference on Data Mining Workshop (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07539318",
"title": "Evaluation of Graph Sampling: A Visualization Perspective",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07539318/13rRUxZzAhI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/focs/2018/4230/0/423000a089",
"title": "A Faster Isomorphism Test for Graphs of Small Degree",
"doi": null,
"abstractUrl": "/proceedings-article/focs/2018/423000a089/17D45WnnFYq",
"parentPublication": {
"id": "proceedings/focs/2018/4230/0",
"title": "2018 IEEE 59th Annual Symposium on Foundations of Computer Science (FOCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2018/7308/0/08644918",
"title": "Concurrent Hybrid Breadth-First-Search on Distributed PowerGraph for Skewed Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2018/08644918/17QjJccHUPu",
"parentPublication": {
"id": "proceedings/icpads/2018/7308/0",
"title": "2018 IEEE 24th International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipdps/2022/8106/0/810600a269",
"title": "Parallel Global Edge Switching for the Uniform Sampling of Simple Graphs with Prescribed Degrees",
"doi": null,
"abstractUrl": "/proceedings-article/ipdps/2022/810600a269/1F1VYkEiOpW",
"parentPublication": {
"id": "proceedings/ipdps/2022/8106/0",
"title": "2022 IEEE International Parallel and Distributed Processing Symposium (IPDPS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2022/0883/0/088300a472",
"title": "SLUGGER: Lossless Hierarchical Summarization of Massive Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2022/088300a472/1FwFvwzkqbu",
"parentPublication": {
"id": "proceedings/icde/2022/0883/0",
"title": "2022 IEEE 38th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09413202",
"title": "Sketch-based Community Detection via Representative Node Sampling",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09413202/1tmhwwnXwL6",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09866555",
"articleId": "1G6ALnVS50I",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09869633",
"articleId": "1GeVHT5oVtC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1FUYBSBjTRm",
"doi": "10.1109/TVCG.2022.3199412",
"abstract": "In virtual reality, talking face generation is committed to using voice and face images to generate real face speech videos to improve the communication experience in the case of limited user information exchange. In a real video, blinking is an action often accompanied by speech, and it is also one of the indispensable actions in real face speech videos. However, the current methods either do not pay attention to the generation of eye movements, or cannot control the blinking in the generated results. To this end, this paper proposes a novel system which produces vivid talking face with controllable eye blinks driven by the joint features including identity feature, audio feature, and blink feature. In order to disentangle the blinking action, we designed three independent features to individually drive the main components in the generated frame, namely the facial appearance, mouth movements, and eye movements. Through the adversarial training of the identity encoder, we filter out the information of the eye state from the identity feature, thereby strengthening the independence of the blinking feature. We introduced the blink score as the leading information of the blink feature, and through training, the value can be consistent with human perception to form a complete and independent control of the eyes. Experimental results on multiple datasets show that our method can not only reproduce real talking faces, but also ensure that the blinking pattern and time are fully controllable.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In virtual reality, talking face generation is committed to using voice and face images to generate real face speech videos to improve the communication experience in the case of limited user information exchange. In a real video, blinking is an action often accompanied by speech, and it is also one of the indispensable actions in real face speech videos. However, the current methods either do not pay attention to the generation of eye movements, or cannot control the blinking in the generated results. To this end, this paper proposes a novel system which produces vivid talking face with controllable eye blinks driven by the joint features including identity feature, audio feature, and blink feature. In order to disentangle the blinking action, we designed three independent features to individually drive the main components in the generated frame, namely the facial appearance, mouth movements, and eye movements. Through the adversarial training of the identity encoder, we filter out the information of the eye state from the identity feature, thereby strengthening the independence of the blinking feature. We introduced the blink score as the leading information of the blink feature, and through training, the value can be consistent with human perception to form a complete and independent control of the eyes. Experimental results on multiple datasets show that our method can not only reproduce real talking faces, but also ensure that the blinking pattern and time are fully controllable.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In virtual reality, talking face generation is committed to using voice and face images to generate real face speech videos to improve the communication experience in the case of limited user information exchange. In a real video, blinking is an action often accompanied by speech, and it is also one of the indispensable actions in real face speech videos. However, the current methods either do not pay attention to the generation of eye movements, or cannot control the blinking in the generated results. To this end, this paper proposes a novel system which produces vivid talking face with controllable eye blinks driven by the joint features including identity feature, audio feature, and blink feature. In order to disentangle the blinking action, we designed three independent features to individually drive the main components in the generated frame, namely the facial appearance, mouth movements, and eye movements. Through the adversarial training of the identity encoder, we filter out the information of the eye state from the identity feature, thereby strengthening the independence of the blinking feature. We introduced the blink score as the leading information of the blink feature, and through training, the value can be consistent with human perception to form a complete and independent control of the eyes. Experimental results on multiple datasets show that our method can not only reproduce real talking faces, but also ensure that the blinking pattern and time are fully controllable.",
"title": "Generating Talking Face with Controllable Eye Movements by Disentangled Blinking Feature",
"normalizedTitle": "Generating Talking Face with Controllable Eye Movements by Disentangled Blinking Feature",
"fno": "09858334",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Faces",
"Videos",
"Mouth",
"Training",
"Three Dimensional Displays",
"Feature Extraction",
"Decoding",
"Talking Face Generation",
"Eye Blink Generation",
"Blink Feature",
"Virtual Character"
],
"authors": [
{
"givenName": "Shiguang",
"surname": "Liu",
"fullName": "Shiguang Liu",
"affiliation": "College of Intelligence and Computing, Tianjin University, Tianjin, P.R. China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jiaqi",
"surname": "Hao",
"fullName": "Jiaqi Hao",
"affiliation": "College of Intelligence and Computing, Tianjin University, Tianjin, P.R. China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-08-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/pg/1998/8620/0/86200140",
"title": "Image Talk: A Real Time Synthetic Talking Head Using One Single Image with Chinese Text-To-Speech Capability",
"doi": null,
"abstractUrl": "/proceedings-article/pg/1998/86200140/12OmNyNzhxz",
"parentPublication": {
"id": "proceedings/pg/1998/8620/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06011835",
"title": "Realistic facial expression synthesis for an image-based talking head",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06011835/12OmNzcxYWX",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200d847",
"title": "FACIAL: Synthesizing Dynamic Talking Face with Implicit Attribute Learning",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200d847/1BmFXTjNhrG",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600v1393",
"title": "Talking Face Generation with Multilingual TTS",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600v1393/1H1lAyVFxF6",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdcs/2022/7177/0/717700b040",
"title": "BlinkRadar: Non-Intrusive Driver Eye-Blink Detection with UWB Radar",
"doi": null,
"abstractUrl": "/proceedings-article/icdcs/2022/717700b040/1HriZc3SaeQ",
"parentPublication": {
"id": "proceedings/icdcs/2022/7177/0",
"title": "2022 IEEE 42nd International Conference on Distributed Computing Systems (ICDCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10061572",
"title": "Free-HeadGAN: Neural Talking Head Synthesis with Explicit Gaze Control",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10061572/1Lk2C6ZD2zC",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412425",
"title": "Talking Face Generation via Learning Semantic and Temporal Synchronous Landmarks",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412425/1tmiW2iEQW4",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/02/09557828",
"title": "3D Talking Face With Personalized Pose Dynamics",
"doi": null,
"abstractUrl": "/journal/tg/2023/02/09557828/1xquHkBQ2xa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/4.509E179",
"title": "Pose-Controllable Talking Face Generation by Implicitly Modularized Audio-Visual Representation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/4.509E179/1yeLFZU81zy",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2021/0019/0/09597440",
"title": "Analysis of Eye Fixations During Emotion Recognition in Talking Faces",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2021/09597440/1yylelPtnB6",
"parentPublication": {
"id": "proceedings/acii/2021/0019/0",
"title": "2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09854202",
"articleId": "1FJ0TF9D3Jm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09866555",
"articleId": "1G6ALnVS50I",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1FWhXe4NI4w",
"name": "ttg555501-09858334s1-supp1-3199412.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09858334s1-supp1-3199412.mp4",
"extension": "mp4",
"size": "52.7 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1FJ0TF9D3Jm",
"doi": "10.1109/TVCG.2022.3197810",
"abstract": "This paper presents the results from a Spatial Augmented Reality (SAR) study which evaluated the cognitive cost of several predictive cues. Participants performed a validated procedural button pressing task, where the predictive cue annotations guided them to the upcoming task. While existing research has evaluated predictive cues based on their performance and self-rated mental effort, actual cognitive cost has yet to be investigated. To measure the user's brain activity, this study utilized electroencephalogram (EEG) recordings. Cognitive load was evaluated by measuring brain responses for a secondary auditory oddball task, with reduced brain responses to oddball tones expected when cognitive load in the primary task is highest. A simple monitor n-back task and procedural task comparing monitor vs SAR were conducted, followed by a version of the procedural task comparing the SAR predictive cues. Results from the brain responses were able to distinguish between performance enhancing cues with a high and low cognitive load. Electrical brain responses also revealed that having an arc or arrow guide towards the upcoming task required the least amount of mental effort.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents the results from a Spatial Augmented Reality (SAR) study which evaluated the cognitive cost of several predictive cues. Participants performed a validated procedural button pressing task, where the predictive cue annotations guided them to the upcoming task. While existing research has evaluated predictive cues based on their performance and self-rated mental effort, actual cognitive cost has yet to be investigated. To measure the user's brain activity, this study utilized electroencephalogram (EEG) recordings. Cognitive load was evaluated by measuring brain responses for a secondary auditory oddball task, with reduced brain responses to oddball tones expected when cognitive load in the primary task is highest. A simple monitor n-back task and procedural task comparing monitor vs SAR were conducted, followed by a version of the procedural task comparing the SAR predictive cues. Results from the brain responses were able to distinguish between performance enhancing cues with a high and low cognitive load. Electrical brain responses also revealed that having an arc or arrow guide towards the upcoming task required the least amount of mental effort.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents the results from a Spatial Augmented Reality (SAR) study which evaluated the cognitive cost of several predictive cues. Participants performed a validated procedural button pressing task, where the predictive cue annotations guided them to the upcoming task. While existing research has evaluated predictive cues based on their performance and self-rated mental effort, actual cognitive cost has yet to be investigated. To measure the user's brain activity, this study utilized electroencephalogram (EEG) recordings. Cognitive load was evaluated by measuring brain responses for a secondary auditory oddball task, with reduced brain responses to oddball tones expected when cognitive load in the primary task is highest. A simple monitor n-back task and procedural task comparing monitor vs SAR were conducted, followed by a version of the procedural task comparing the SAR predictive cues. Results from the brain responses were able to distinguish between performance enhancing cues with a high and low cognitive load. Electrical brain responses also revealed that having an arc or arrow guide towards the upcoming task required the least amount of mental effort.",
"title": "Event Related Brain Responses Reveal the Impact of Spatial Augmented Reality Predictive Cues on Mental Effort",
"normalizedTitle": "Event Related Brain Responses Reveal the Impact of Spatial Augmented Reality Predictive Cues on Mental Effort",
"fno": "09854202",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Task Analysis",
"Visualization",
"Electroencephalography",
"Monitoring",
"Pressing",
"Costs",
"Resists",
"Spatial Augmented Reality",
"Predictive Cues",
"EEG",
"Mental Effort",
"Cognitive Load"
],
"authors": [
{
"givenName": "Benjamin",
"surname": "Volmer",
"fullName": "Benjamin Volmer",
"affiliation": "Australian Research Centre for Interactive and Virtual EnvironmentsUniversity of South Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "James",
"surname": "Baumeister",
"fullName": "James Baumeister",
"affiliation": "Australian Research Centre for Interactive and Virtual EnvironmentsUniversity of South Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Stewart",
"surname": "Von Itzstein",
"fullName": "Stewart Von Itzstein",
"affiliation": "Australian Research Centre for Interactive and Virtual EnvironmentsUniversity of South Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Matthias",
"surname": "Schlesewsky",
"fullName": "Matthias Schlesewsky",
"affiliation": "Australian Research Centre for Interactive and Virtual EnvironmentsUniversity of South Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ina",
"surname": "Bornkessel-Schlesewsky",
"fullName": "Ina Bornkessel-Schlesewsky",
"affiliation": "Australian Research Centre for Interactive and Virtual EnvironmentsUniversity of South Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bruce H.",
"surname": "Thomas",
"fullName": "Bruce H. Thomas",
"affiliation": "Australian Research Centre for Interactive and Virtual EnvironmentsUniversity of South Australia",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-08-01 00:00:00",
"pubType": "trans",
"pages": "1-18",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/acii/2017/0563/0/08273588",
"title": "Discovering gender differences in facial emotion recognition via implicit behavioral cues",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2017/08273588/12OmNBLdKQl",
"parentPublication": {
"id": "proceedings/acii/2017/0563/0",
"title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2014/6572/0/6572a652",
"title": "A Minimal Spanning Tree Analysis of EEG Responses to Complex Visual Stimuli",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2014/6572a652/12OmNBSjJ3j",
"parentPublication": {
"id": "proceedings/ictai/2014/6572/0",
"title": "2014 IEEE 26th International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2014/4308/0/4308a381",
"title": "Understanding Effects of Cognitive Load from Pupillary Responses Using Hilbert Analytic Phase",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a381/12OmNC8uRjd",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2013/5261/0/06685016",
"title": "EEG-based comparisons of performance on a mental rotation task between learning styles and gender",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2013/06685016/12OmNqI04C1",
"parentPublication": {
"id": "proceedings/fie/2013/5261/0",
"title": "2013 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2015/9953/0/07344549",
"title": "Physiological correlates of mental effort as manipulated through lane width during simulated driving",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2015/07344549/12OmNxiKs2K",
"parentPublication": {
"id": "proceedings/acii/2015/9953/0",
"title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08493594",
"title": "A Comparison of Predictive Spatial Augmented Reality Cues for Procedural Tasks",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08493594/14M3DYV3qyA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acpr/2017/3354/0/3354a906",
"title": "A Hierarchical Classification Strategy for Robust Detection of Passive/Active Mental State Using User-Voluntary Pitch Imagery Task",
"doi": null,
"abstractUrl": "/proceedings-article/acpr/2017/3354a906/17D45Vu1TyA",
"parentPublication": {
"id": "proceedings/acpr/2017/3354/0",
"title": "2017 4th IAPR Asian Conference on Pattern Recognition (ACPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sec/2022/8611/0/861100a318",
"title": "Poster: Imitation Learning for Hearing Loss Detection with Cortical Speech-Evoked Responses",
"doi": null,
"abstractUrl": "/proceedings-article/sec/2022/861100a318/1JC1l3xVtAI",
"parentPublication": {
"id": "proceedings/sec/2022/8611/0",
"title": "2022 IEEE/ACM 7th Symposium on Edge Computing (SEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798029",
"title": "Studying the Mental Effort in Virtual Versus Real Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798029/1cJ0I9M7tVm",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798266",
"title": "Towards EEG-Based Haptic Interaction within Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798266/1cJ13SHk4dW",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09852717",
"articleId": "1FHlThR8hLG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09858334",
"articleId": "1FUYBSBjTRm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1FKFBGSnh6w",
"name": "ttg555501-09854202s1-supp1-3197810.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09854202s1-supp1-3197810.pdf",
"extension": "pdf",
"size": "1.18 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1FHlT4i4Pmw",
"doi": "10.1109/TVCG.2022.3197560",
"abstract": "Creating 3D shapes from 2D drawings is an important problem with applications in content creation for computer animation and virtual reality. We introduce a new sketch-based system, <italic>CreatureShop</italic>, that enables amateurs to create high-quality textured 3D character models from 2D drawings with ease and efficiency. CreatureShop takes an input bitmap drawing of a character (such as an animal or other creature), depicted from an arbitrary descriptive pose and viewpoint, and creates a 3D shape with plausible geometric details and textures from a small number of user annotations on the 2D drawing. Our key contributions are a novel oblique view modeling method, a set of systematic approaches for producing plausible textures on the invisible or occluded parts of the 3D character (as viewed from the direction of the input drawing), and a user-friendly interactive system. We validate our system and methods by creating numerous 3D characters from various drawings, and compare our results with related works to show the advantages of our method. We perform a user study to evaluate the usability of our system, which demonstrates that our system is a practical and efficient approach to create fully-textured 3D character models for novice users.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Creating 3D shapes from 2D drawings is an important problem with applications in content creation for computer animation and virtual reality. We introduce a new sketch-based system, <italic>CreatureShop</italic>, that enables amateurs to create high-quality textured 3D character models from 2D drawings with ease and efficiency. CreatureShop takes an input bitmap drawing of a character (such as an animal or other creature), depicted from an arbitrary descriptive pose and viewpoint, and creates a 3D shape with plausible geometric details and textures from a small number of user annotations on the 2D drawing. Our key contributions are a novel oblique view modeling method, a set of systematic approaches for producing plausible textures on the invisible or occluded parts of the 3D character (as viewed from the direction of the input drawing), and a user-friendly interactive system. We validate our system and methods by creating numerous 3D characters from various drawings, and compare our results with related works to show the advantages of our method. We perform a user study to evaluate the usability of our system, which demonstrates that our system is a practical and efficient approach to create fully-textured 3D character models for novice users.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Creating 3D shapes from 2D drawings is an important problem with applications in content creation for computer animation and virtual reality. We introduce a new sketch-based system, CreatureShop, that enables amateurs to create high-quality textured 3D character models from 2D drawings with ease and efficiency. CreatureShop takes an input bitmap drawing of a character (such as an animal or other creature), depicted from an arbitrary descriptive pose and viewpoint, and creates a 3D shape with plausible geometric details and textures from a small number of user annotations on the 2D drawing. Our key contributions are a novel oblique view modeling method, a set of systematic approaches for producing plausible textures on the invisible or occluded parts of the 3D character (as viewed from the direction of the input drawing), and a user-friendly interactive system. We validate our system and methods by creating numerous 3D characters from various drawings, and compare our results with related works to show the advantages of our method. We perform a user study to evaluate the usability of our system, which demonstrates that our system is a practical and efficient approach to create fully-textured 3D character models for novice users.",
"title": "CreatureShop: Interactive 3D Character Modeling and Texturing from a Single Color Drawing",
"normalizedTitle": "CreatureShop: Interactive 3D Character Modeling and Texturing from a Single Color Drawing",
"fno": "09852696",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Three Dimensional Displays",
"Solid Modeling",
"Shape",
"Surface Texture",
"Geometry",
"Electronic Mail",
"Annotations",
"Character Modeling",
"Character Texturing",
"Interactive Techniques"
],
"authors": [
{
"givenName": "Congyi",
"surname": "Zhang",
"fullName": "Congyi Zhang",
"affiliation": "University of Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lei",
"surname": "Yang",
"fullName": "Lei Yang",
"affiliation": "University of Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nenglun",
"surname": "Chen",
"fullName": "Nenglun Chen",
"affiliation": "University of Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nicholas",
"surname": "Vining",
"fullName": "Nicholas Vining",
"affiliation": "NVIDIA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alla",
"surname": "Sheffer",
"fullName": "Alla Sheffer",
"affiliation": "University of British Columbia, Vancouver, BC, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Francis C.M.",
"surname": "Lau",
"fullName": "Francis C.M. Lau",
"affiliation": "University of Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guoping",
"surname": "Wang",
"fullName": "Guoping Wang",
"affiliation": "Peking University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wenping",
"surname": "Wang",
"fullName": "Wenping Wang",
"affiliation": "Texas A&M University, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-08-01 00:00:00",
"pubType": "trans",
"pages": "1-18",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2009/3965/0/04811229",
"title": "Poster: Spatially augmented tape drawing",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2009/04811229/12OmNASraBm",
"parentPublication": {
"id": "proceedings/3dui/2009/3965/0",
"title": "2009 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892323",
"title": "Texturing of augmented reality character based on colored drawing",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892323/12OmNCcKQu9",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/das/2014/3244/0/3244a267",
"title": "Color Descriptor for Content-Based Drawing Retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/das/2014/3244a267/12OmNqFrGLO",
"parentPublication": {
"id": "proceedings/das/2014/3244/0",
"title": "2014 11th IAPR International Workshop on Document Analysis Systems (DAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2001/1227/0/12270108",
"title": "Progressive 3D Reconstruction from a Sketch Drawing",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2001/12270108/12OmNvmXJ7d",
"parentPublication": {
"id": "proceedings/pg/2001/1227/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2005/2334/1/01541267",
"title": "3D object reconstruction from a single 2D line drawing without hidden lines",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2005/01541267/12OmNwwd2Xv",
"parentPublication": {
"id": "proceedings/iccv/2005/2334/2",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206841",
"title": "3D reconstruction of curved objects from single 2D line drawings",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206841/12OmNwwuDPa",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/02/06891368",
"title": "Progressive 3D Reconstruction of Planar-Faced Manifold Objects with DRF-Based Line Drawing Decomposition",
"doi": null,
"abstractUrl": "/journal/tg/2015/02/06891368/13rRUwInuWw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2002/12/i1579",
"title": "Identifying Faces in a 2D Line Drawing Representing a Manifold Object",
"doi": null,
"abstractUrl": "/journal/tp/2002/12/i1579/13rRUwhHcRL",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200o4184",
"title": "Neural Strokes: Stylized Line Drawing of 3D Shapes",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200o4184/1BmI4k03VrG",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nicoint/2022/6908/0/690800a037",
"title": "Impossible Objects of Your Choice: Designing Any 3D Objects from a 2D Line Drawing",
"doi": null,
"abstractUrl": "/proceedings-article/nicoint/2022/690800a037/1FWmXYwyCpG",
"parentPublication": {
"id": "proceedings/nicoint/2022/6908/0",
"title": "2022 Nicograph International (NicoInt)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09852330",
"articleId": "1FFHdt1RWHC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09852717",
"articleId": "1FHlThR8hLG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1FKFBjaiIso",
"name": "ttg555501-09852696s1-supp1-3197560.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09852696s1-supp1-3197560.mp4",
"extension": "mp4",
"size": "66.6 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1FHlThR8hLG",
"doi": "10.1109/TVCG.2022.3197383",
"abstract": "3D registration is a fundamental step to obtain the correspondences between surfaces. Traditional mesh alignment methods tackle this problem through non-rigid deformation, mostly accomplished by applying ICP-based (Iterative Closest Point) optimization. The embedded deformation method is proposed for the purpose of acceleration, which enables various real-time applications. However, it regularizes on an underlying simplified structure, which could be problematic for intricate cases when the simplified graph doesn't fully represent the surface attributes. Moreover, without elaborate parameter-tuning, deformation usually performs suboptimally, leading to slow convergence or a local minimum if all regions on the surface are assumed to share the same rigidity during the optimization. In this paper, we propose a novel solution that decouples regularization from the underlying deformation model by explicitly managing the rigidity of vertex clusters. We further design an efficient two-step solution that alternates between isometric deformation and embedded deformation with cluster-based regularization. Our method can easily support region-adaptive regularization with cluster refinement and execute efficiently. Extensive experiments demonstrate the effectiveness of our approach for mesh alignment tasks even under large-scale deformation and imperfect data. Our method outperforms state-of-the-art methods both numerically and visually.",
"abstracts": [
{
"abstractType": "Regular",
"content": "3D registration is a fundamental step to obtain the correspondences between surfaces. Traditional mesh alignment methods tackle this problem through non-rigid deformation, mostly accomplished by applying ICP-based (Iterative Closest Point) optimization. The embedded deformation method is proposed for the purpose of acceleration, which enables various real-time applications. However, it regularizes on an underlying simplified structure, which could be problematic for intricate cases when the simplified graph doesn't fully represent the surface attributes. Moreover, without elaborate parameter-tuning, deformation usually performs suboptimally, leading to slow convergence or a local minimum if all regions on the surface are assumed to share the same rigidity during the optimization. In this paper, we propose a novel solution that decouples regularization from the underlying deformation model by explicitly managing the rigidity of vertex clusters. We further design an efficient two-step solution that alternates between isometric deformation and embedded deformation with cluster-based regularization. Our method can easily support region-adaptive regularization with cluster refinement and execute efficiently. Extensive experiments demonstrate the effectiveness of our approach for mesh alignment tasks even under large-scale deformation and imperfect data. Our method outperforms state-of-the-art methods both numerically and visually.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "3D registration is a fundamental step to obtain the correspondences between surfaces. Traditional mesh alignment methods tackle this problem through non-rigid deformation, mostly accomplished by applying ICP-based (Iterative Closest Point) optimization. The embedded deformation method is proposed for the purpose of acceleration, which enables various real-time applications. However, it regularizes on an underlying simplified structure, which could be problematic for intricate cases when the simplified graph doesn't fully represent the surface attributes. Moreover, without elaborate parameter-tuning, deformation usually performs suboptimally, leading to slow convergence or a local minimum if all regions on the surface are assumed to share the same rigidity during the optimization. In this paper, we propose a novel solution that decouples regularization from the underlying deformation model by explicitly managing the rigidity of vertex clusters. We further design an efficient two-step solution that alternates between isometric deformation and embedded deformation with cluster-based regularization. Our method can easily support region-adaptive regularization with cluster refinement and execute efficiently. Extensive experiments demonstrate the effectiveness of our approach for mesh alignment tasks even under large-scale deformation and imperfect data. Our method outperforms state-of-the-art methods both numerically and visually.",
"title": "Efficient Registration for Human Surfaces Via Isometric Regularization on Embedded Deformation",
"normalizedTitle": "Efficient Registration for Human Surfaces Via Isometric Regularization on Embedded Deformation",
"fno": "09852717",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Strain",
"Optimization",
"Three Dimensional Displays",
"Deformable Models",
"Rigidity",
"Real Time Systems",
"Shape",
"3 D Registration",
"3 D Segmentation",
"Mesh Alignment",
"Non Rigid Deformation",
"Regularization"
],
"authors": [
{
"givenName": "Kunyao",
"surname": "Chen",
"fullName": "Kunyao Chen",
"affiliation": "ECE, UCSD, La Jolla, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Fei",
"surname": "Yin",
"fullName": "Fei Yin",
"affiliation": "Electrical and Computer Engineering, University of California San Diego, La Jolla, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bang",
"surname": "Du",
"fullName": "Bang Du",
"affiliation": "Electrical and Computer Engineering, University of California San Diego, La Jolla, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Baichuan",
"surname": "Wu",
"fullName": "Baichuan Wu",
"affiliation": "Jacobs School of Engineering, University of California San Diego, La Jolla, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Truong Q.",
"surname": "Nguyen",
"fullName": "Truong Q. Nguyen",
"affiliation": "Electrical and Computer Engineering, University of California San Diego, La Jolla, CA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-08-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2018/4886/0/488601a876",
"title": "Vector Graph Representation for Deformation Transfer Using Poisson Interpolation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2018/488601a876/12OmNAOKnQh",
"parentPublication": {
"id": "proceedings/wacv/2018/4886/0",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10049724",
"title": "Fast and Robust Non-Rigid Registration Using Accelerated Majorization-Minimization",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10049724/1KYogPkTzOM",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a073",
"title": "Surface Flattening Based on Energy Fabric Deformation Model in Garment Design",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a073/1ap5xx2ft5e",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2019/9226/0/922600a021",
"title": "Object-in-Hand Feature Displacement with Physically-Based Deformation",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2019/922600a021/1cMF6VjqqT6",
"parentPublication": {
"id": "proceedings/pacificvis/2019/9226/0",
"title": "2019 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2019/9552/0/955200a308",
"title": "Global as-Conformal-as-Possible Non-Rigid Registration of Multi-view Scans",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2019/955200a308/1cdOR3XSAY8",
"parentPublication": {
"id": "proceedings/icme/2019/9552/0",
"title": "2019 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/03/08839414",
"title": "Sparse Data Driven Mesh Deformation",
"doi": null,
"abstractUrl": "/journal/tg/2021/03/08839414/1dqsrINsJsk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a220",
"title": "Screen-space Regularization on Differentiable Rasterization",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a220/1qyxlQVwTKM",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/10/09444875",
"title": "Variational Autoencoders for Localized Mesh Deformation Component Analysis",
"doi": null,
"abstractUrl": "/journal/tp/2022/10/09444875/1u51uvab1eM",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/02/09537699",
"title": "Multiscale Mesh Deformation Component Analysis With Attention-Based Autoencoders",
"doi": null,
"abstractUrl": "/journal/tg/2023/02/09537699/1wTiueApSAU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900k0923",
"title": "Learning-based Image Registration with Meta-Regularization",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900k0923/1yeHJs5J8E8",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09852696",
"articleId": "1FHlT4i4Pmw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09854202",
"articleId": "1FJ0TF9D3Jm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1FFHd5L2SxG",
"doi": "10.1109/TVCG.2022.3197203",
"abstract": "Although deep learning has demonstrated its capability in solving diverse scientific visualization problems, it still lacks generalization power across different tasks. To address this challenge, we propose CoordNet, a single coordinate-based framework that tackles various tasks relevant to time-varying volumetric data visualization without modifying the network architecture. The core idea of our approach is to decompose diverse task inputs and outputs into a unified representation (i.e., coordinates and values) and learn a function from coordinates to their corresponding values. We achieve this goal using a residual block-based implicit neural representation architecture with periodic activation functions. We evaluate CoordNet on data generation (i.e., temporal super-resolution and spatial super-resolution) and visualization generation (i.e., view synthesis and ambient occlusion prediction) tasks using time-varying volumetric data sets of various characteristics. The experimental results indicate that CoordNet achieves better quantitative and qualitative results than the state-of-the-art approaches across all the evaluated tasks. Source code and pre-trained models are available at <uri>https://github.com/stevenhan1991/CoordNet</uri>.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Although deep learning has demonstrated its capability in solving diverse scientific visualization problems, it still lacks generalization power across different tasks. To address this challenge, we propose CoordNet, a single coordinate-based framework that tackles various tasks relevant to time-varying volumetric data visualization without modifying the network architecture. The core idea of our approach is to decompose diverse task inputs and outputs into a unified representation (i.e., coordinates and values) and learn a function from coordinates to their corresponding values. We achieve this goal using a residual block-based implicit neural representation architecture with periodic activation functions. We evaluate CoordNet on data generation (i.e., temporal super-resolution and spatial super-resolution) and visualization generation (i.e., view synthesis and ambient occlusion prediction) tasks using time-varying volumetric data sets of various characteristics. The experimental results indicate that CoordNet achieves better quantitative and qualitative results than the state-of-the-art approaches across all the evaluated tasks. Source code and pre-trained models are available at <uri>https://github.com/stevenhan1991/CoordNet</uri>.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Although deep learning has demonstrated its capability in solving diverse scientific visualization problems, it still lacks generalization power across different tasks. To address this challenge, we propose CoordNet, a single coordinate-based framework that tackles various tasks relevant to time-varying volumetric data visualization without modifying the network architecture. The core idea of our approach is to decompose diverse task inputs and outputs into a unified representation (i.e., coordinates and values) and learn a function from coordinates to their corresponding values. We achieve this goal using a residual block-based implicit neural representation architecture with periodic activation functions. We evaluate CoordNet on data generation (i.e., temporal super-resolution and spatial super-resolution) and visualization generation (i.e., view synthesis and ambient occlusion prediction) tasks using time-varying volumetric data sets of various characteristics. The experimental results indicate that CoordNet achieves better quantitative and qualitative results than the state-of-the-art approaches across all the evaluated tasks. Source code and pre-trained models are available at https://github.com/stevenhan1991/CoordNet.",
"title": "CoordNet: Data Generation and Visualization Generation for Time-Varying Volumes via a Coordinate-Based Neural Network",
"normalizedTitle": "CoordNet: Data Generation and Visualization Generation for Time-Varying Volumes via a Coordinate-Based Neural Network",
"fno": "09852325",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Task Analysis",
"Data Visualization",
"Rendering Computer Graphics",
"Deep Learning",
"Superresolution",
"Neural Networks",
"Decoding",
"Volume Visualization",
"Implicit Neural Representation",
"Data Generation",
"Visualization Generation"
],
"authors": [
{
"givenName": "Jun",
"surname": "Han",
"fullName": "Jun Han",
"affiliation": "School of Data Science, The Chinese University of Hong Kong, Shenzhen, Shenzhen, Guangdong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chaoli",
"surname": "Wang",
"fullName": "Chaoli Wang",
"affiliation": "Department of Computer Science and Engineering, University of Notre Dame, Notre Dame, IN, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-08-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vg/2005/26/0/01500530",
"title": "Time-varying interval volumes",
"doi": null,
"abstractUrl": "/proceedings-article/vg/2005/01500530/12OmNBigFuM",
"parentPublication": {
"id": "proceedings/vg/2005/26/0",
"title": "Volume Graphics 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visual/1994/6627/0/00346340",
"title": "VolVis: a diversified volume visualization system",
"doi": null,
"abstractUrl": "/proceedings-article/visual/1994/00346340/12OmNroijkk",
"parentPublication": {
"id": "proceedings/visual/1994/6627/0",
"title": "Proceedings Visualization '94",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2011/935/0/05742378",
"title": "Analyzing information transfer in time-varying multivariate data",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2011/05742378/12OmNvA1h6P",
"parentPublication": {
"id": "proceedings/pacificvis/2011/935/0",
"title": "2011 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pvg/2003/2091/0/20910013",
"title": "A PC Cluster System for Simultaneous Interactive Volumetric Modeling and Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/pvg/2003/20910013/12OmNvAAtys",
"parentPublication": {
"id": "proceedings/pvg/2003/2091/0",
"title": "Parallel and Large-Data Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visual/1992/2897/0/00235231",
"title": "Towards a comprehensive volume visualization system",
"doi": null,
"abstractUrl": "/proceedings-article/visual/1992/00235231/12OmNwwd2JM",
"parentPublication": {
"id": "proceedings/visual/1992/2897/0",
"title": "Proceedings Visualization '92",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visual/1993/3940/0/00398846",
"title": "Flow volumes for interactive vector field visualization",
"doi": null,
"abstractUrl": "/proceedings-article/visual/1993/00398846/12OmNy314f9",
"parentPublication": {
"id": "proceedings/visual/1993/3940/0",
"title": "Proceedings Visualization '93",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pvg/2003/2091/0/20910010",
"title": "Real-Time Volume Rendering of Time-Varying Data Using a Fragment-Shader Compression Approach",
"doi": null,
"abstractUrl": "/proceedings-article/pvg/2003/20910010/12OmNyaGeMm",
"parentPublication": {
"id": "proceedings/pvg/2003/2091/0",
"title": "Parallel and Large-Data Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2008/1966/0/04475454",
"title": "Dynamic Shader Generation for Flexible Multi-Volume Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2008/04475454/12OmNyfdOUy",
"parentPublication": {
"id": "proceedings/pacificvis/2008/1966/0",
"title": "IEEE Pacific Visualization Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/11/06851204",
"title": "Study of a Ray Casting Technique for the Visualization of Deformable Volumes",
"doi": null,
"abstractUrl": "/journal/tg/2014/11/06851204/13rRUEgarBv",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2019/9226/0/922600a282",
"title": "DNN-VolVis: Interactive Volume Visualization Supported by Deep Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2019/922600a282/1cMF6YRpyH6",
"parentPublication": {
"id": "proceedings/pacificvis/2019/9226/0",
"title": "2019 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09850416",
"articleId": "1Fz4SPLVTMY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09852330",
"articleId": "1FFHdt1RWHC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1G6AL2R0S9G",
"name": "ttg555501-09852325s1-supp2-3197203.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09852325s1-supp2-3197203.pdf",
"extension": "pdf",
"size": "109 MB",
"__typename": "WebExtraType"
},
{
"id": "1FHlTUmTZWo",
"name": "ttg555501-09852325s1-supp1-3197203.wmv",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09852325s1-supp1-3197203.wmv",
"extension": "wmv",
"size": "112 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1FFHdt1RWHC",
"doi": "10.1109/TVCG.2022.3197354",
"abstract": "In this paper, we present an end-to-end neural solution to model portrait bas-relief from a single photograph, which is cast as a problem of image-to-depth translation. The main challenge is the lack of bas-relief data for network training. To solve this problem, we propose a semi-automatic pipeline to synthesize bas-relief samples. The main idea is to first construct normal maps from photos, and then generate bas-relief samples by reconstructing pixel-wise depths. In total, our synthetic dataset contains 23k pixel-wise photo/bas-relief pairs. Since the process of bas-relief synthesis requires a certain amount of user interactions, we propose end-to-end solutions with various network architectures, and train them on the synthetic data. We select the one that gave the best results through qualitative and quantitative comparisons. Experiments on numerous portrait photos, comparisons with state-of-the-art methods and evaluations by artists have proven the effectiveness and efficiency of the selected network.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we present an end-to-end neural solution to model portrait bas-relief from a single photograph, which is cast as a problem of image-to-depth translation. The main challenge is the lack of bas-relief data for network training. To solve this problem, we propose a semi-automatic pipeline to synthesize bas-relief samples. The main idea is to first construct normal maps from photos, and then generate bas-relief samples by reconstructing pixel-wise depths. In total, our synthetic dataset contains 23k pixel-wise photo/bas-relief pairs. Since the process of bas-relief synthesis requires a certain amount of user interactions, we propose end-to-end solutions with various network architectures, and train them on the synthetic data. We select the one that gave the best results through qualitative and quantitative comparisons. Experiments on numerous portrait photos, comparisons with state-of-the-art methods and evaluations by artists have proven the effectiveness and efficiency of the selected network.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we present an end-to-end neural solution to model portrait bas-relief from a single photograph, which is cast as a problem of image-to-depth translation. The main challenge is the lack of bas-relief data for network training. To solve this problem, we propose a semi-automatic pipeline to synthesize bas-relief samples. The main idea is to first construct normal maps from photos, and then generate bas-relief samples by reconstructing pixel-wise depths. In total, our synthetic dataset contains 23k pixel-wise photo/bas-relief pairs. Since the process of bas-relief synthesis requires a certain amount of user interactions, we propose end-to-end solutions with various network architectures, and train them on the synthetic data. We select the one that gave the best results through qualitative and quantitative comparisons. Experiments on numerous portrait photos, comparisons with state-of-the-art methods and evaluations by artists have proven the effectiveness and efficiency of the selected network.",
"title": "Neural Modeling of Portrait Bas-relief from a Single Photograph",
"normalizedTitle": "Neural Modeling of Portrait Bas-relief from a Single Photograph",
"fno": "09852330",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Three Dimensional Displays",
"Hair",
"Image Reconstruction",
"Shape",
"Pipelines",
"Faces",
"Solid Modeling",
"Bas Relief Modeling",
"Portrait Bas Relief",
"Image To Depth Translation",
"Depth Reconstruction"
],
"authors": [
{
"givenName": "Yu-Wei",
"surname": "Zhang",
"fullName": "Yu-Wei Zhang",
"affiliation": "School of Mechanical and Automotive Engineering, Qilu University of Technology (Shandong Academy of Sciences), Jinan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ping",
"surname": "Luo",
"fullName": "Ping Luo",
"affiliation": "School of Mechanical and Automotive Engineering, Qilu University of Technology (Shandong Academy of Sciences), Jinan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hao",
"surname": "Zhou",
"fullName": "Hao Zhou",
"affiliation": "School of Mechanical and Automotive Engineering, Qilu University of Technology (Shandong Academy of Sciences), Jinan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhongping",
"surname": "Ji",
"fullName": "Zhongping Ji",
"affiliation": "Institute of Graphics and Image, Hangzhou Dianzi University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hui",
"surname": "Liu",
"fullName": "Hui Liu",
"affiliation": "School of Computer Science and Technology, Shandong University of Finance and Economics, Jinan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yanzhao",
"surname": "Chen",
"fullName": "Yanzhao Chen",
"affiliation": "School of Mechanical and Automotive Engineering, Qilu University of Technology (Shandong Academy of Sciences), Jinan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Caiming",
"surname": "Zhang",
"fullName": "Caiming Zhang",
"affiliation": "School of Computer Science and Technology, Shandong University, Jinan, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-08-01 00:00:00",
"pubType": "trans",
"pages": "1-16",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ciis/2017/3886/0/3886a294",
"title": "Watermarking Algorithm for Bas-Relief Based on Depth Grayscale Image",
"doi": null,
"abstractUrl": "/proceedings-article/ciis/2017/3886a294/12OmNAPjA5w",
"parentPublication": {
"id": "proceedings/ciis/2017/3886/0",
"title": "2017 International Conference on Computing Intelligence and Information System (CIIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1997/7822/0/78221060",
"title": "The Bas-Relief Ambiguity",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1997/78221060/12OmNyRPgwe",
"parentPublication": {
"id": "proceedings/cvpr/1997/7822/0",
"title": "Proceedings of IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2015/7568/0/7568a411",
"title": "Image-Based Hair Pre-processing for Art Creation: A Case Study of Bas-Relief Modelling",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2015/7568a411/12OmNzBwGJv",
"parentPublication": {
"id": "proceedings/iv/2015/7568/0",
"title": "2015 19th International Conference on Information Visualisation (iV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/03/06975236",
"title": "Bas-Relief Generation and Shape Editing through Gradient-Based Mesh Deformation",
"doi": null,
"abstractUrl": "/journal/tg/2015/03/06975236/13rRUwI5TXA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/05/06684153",
"title": "Bas-Relief Modeling from Normal Images with Intuitive Styles",
"doi": null,
"abstractUrl": "/journal/tg/2014/05/06684153/13rRUx0xPia",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/04/ttg2009040642",
"title": "Bas-Relief Generation Using Adaptive Histogram Equalization",
"doi": null,
"abstractUrl": "/journal/tg/2009/04/ttg2009040642/13rRUyogGA7",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/08/08611145",
"title": "Portrait Relief Modeling from a Single Image",
"doi": null,
"abstractUrl": "/journal/tg/2020/08/08611145/17D45XDIXSX",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/04/08322258",
"title": "Bas-Relief Modeling from Normal Layers",
"doi": null,
"abstractUrl": "/journal/tg/2019/04/08322258/17YCN5E6cAE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2020/9228/0/922800a510",
"title": "Sketch2Relief: Generating Bas-relief from Sketches with Deep Generative Networks",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2020/922800a510/1pP3DzePTB6",
"parentPublication": {
"id": "proceedings/ictai/2020/9228/0",
"title": "2020 IEEE 32nd International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09468903",
"title": "Human Bas-Relief Generation From a Single Photograph",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09468903/1uR9KNPeety",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09852325",
"articleId": "1FFHd5L2SxG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09852696",
"articleId": "1FHlT4i4Pmw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1FHlUzmNTK8",
"name": "ttg555501-09852330s1-supp1-3197354.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09852330s1-supp1-3197354.mp4",
"extension": "mp4",
"size": "2.37 MB",
"__typename": "WebExtraType"
},
{
"id": "1FHlUuOn1jW",
"name": "ttg555501-09852330s1-supp2-3197354.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09852330s1-supp2-3197354.mp4",
"extension": "mp4",
"size": "6.24 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Fz4SEQnoiY",
"doi": "10.1109/TVCG.2022.3196334",
"abstract": "Point cloud upsampling aims to generate dense point clouds from given sparse ones, which is a challenging task due to the irregular and unordered nature of point sets. To address this issue, we present a novel deep learning-based model, called PU-Flow, which incorporates normalizing flows and weight prediction techniques to produce dense points uniformly distributed on the underlying surface. Specifically, we exploit the invertible characteristics of normalizing flows to transform points between Euclidean and latent spaces and formulate the upsampling process as ensemble of neighbouring points in a latent space, where the ensemble weights are adaptively learned from local geometric context. Extensive experiments show that our method is competitive and, in most test cases, it outperforms state-of-the-art methods in terms of reconstruction quality, proximity-to-surface accuracy, and computation efficiency. The source code will be publicly available at <uri>https://github.com/unknownue/puflow</uri>.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Point cloud upsampling aims to generate dense point clouds from given sparse ones, which is a challenging task due to the irregular and unordered nature of point sets. To address this issue, we present a novel deep learning-based model, called PU-Flow, which incorporates normalizing flows and weight prediction techniques to produce dense points uniformly distributed on the underlying surface. Specifically, we exploit the invertible characteristics of normalizing flows to transform points between Euclidean and latent spaces and formulate the upsampling process as ensemble of neighbouring points in a latent space, where the ensemble weights are adaptively learned from local geometric context. Extensive experiments show that our method is competitive and, in most test cases, it outperforms state-of-the-art methods in terms of reconstruction quality, proximity-to-surface accuracy, and computation efficiency. The source code will be publicly available at <uri>https://github.com/unknownue/puflow</uri>.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Point cloud upsampling aims to generate dense point clouds from given sparse ones, which is a challenging task due to the irregular and unordered nature of point sets. To address this issue, we present a novel deep learning-based model, called PU-Flow, which incorporates normalizing flows and weight prediction techniques to produce dense points uniformly distributed on the underlying surface. Specifically, we exploit the invertible characteristics of normalizing flows to transform points between Euclidean and latent spaces and formulate the upsampling process as ensemble of neighbouring points in a latent space, where the ensemble weights are adaptively learned from local geometric context. Extensive experiments show that our method is competitive and, in most test cases, it outperforms state-of-the-art methods in terms of reconstruction quality, proximity-to-surface accuracy, and computation efficiency. The source code will be publicly available at https://github.com/unknownue/puflow.",
"title": "PU-Flow: a Point Cloud Upsampling Network with Normalizing Flows",
"normalizedTitle": "PU-Flow: a Point Cloud Upsampling Network with Normalizing Flows",
"fno": "09850404",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Point Cloud Compression",
"Interpolation",
"Feature Extraction",
"Task Analysis",
"Pipelines",
"Three Dimensional Displays",
"Surface Treatment",
"Point Cloud Analysis",
"Upsampling",
"Normalizing Flows",
"Weight Prediction"
],
"authors": [
{
"givenName": "Aihua",
"surname": "Mao",
"fullName": "Aihua Mao",
"affiliation": "School of Computer Science and Engineering, South China University of Technology, Guangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zihui",
"surname": "Du",
"fullName": "Zihui Du",
"affiliation": "School of Computer Science and Engineering, South China University of Technology, Guangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Junhui",
"surname": "Hou",
"fullName": "Junhui Hou",
"affiliation": "Department of Computer Science, City University of Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yaqi",
"surname": "Duan",
"fullName": "Yaqi Duan",
"affiliation": "School of Computer Science and Engineering, South China University of Technology, Guangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yong-jin",
"surname": "Liu",
"fullName": "Yong-jin Liu",
"affiliation": "BNRist, MOE-Key Laboratory of Pervasive Computing, Department of Computer Science and Technology, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ying",
"surname": "He",
"fullName": "Ying He",
"affiliation": "School of Computer Science and Engineering, Nanyang Technological University, Singapore",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-08-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2018/6420/0/642000c790",
"title": "PU-Net: Point Cloud Upsampling Network",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000c790/17D45XDIXTB",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200q6188",
"title": "PU-EVA: An Edge-Vector based Approximation Solution for Flexible-scale Point Cloud Upsampling",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200q6188/1BmF0g5VGX6",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859662",
"title": "“Zero-Shot” Point Cloud Upsampling",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859662/1G9EpZI4WbK",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600s8612",
"title": "Neural Points: Point Cloud Representation with Neural Fields for Arbitrary Upsampling",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600s8612/1H0KFnuPS6c",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600b989",
"title": "Self-Supervised Arbitrary-Scale Point Clouds Upsampling via Implicit Neural Representation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600b989/1H1ih6WT91S",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10044160",
"title": "Flattening-Net: Deep Regular 2D Representation for 3D Point Cloud Analysis",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10044160/1KL6TgYfsLC",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2022/5670/0/567000a465",
"title": "Arbitrary Point Cloud Upsampling with Spherical Mixture of Gaussians",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2022/567000a465/1KYsuNBHuxO",
"parentPublication": {
"id": "proceedings/3dv/2022/5670/0",
"title": "2022 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300h202",
"title": "PU-GAN: A Point Cloud Upsampling Adversarial Network",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300h202/1hVlmofrMd2",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900l1678",
"title": "PU-GCN: Point Cloud Upsampling using Graph Convolutional Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900l1678/1yeJFgHQHmM",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800b249",
"title": "Go with the Flows: Mixtures of Normalizing Flows for Point Cloud Generation and Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800b249/1zWEkPrnaSI",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09847102",
"articleId": "1Fu4IEH0oAU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09850416",
"articleId": "1Fz4SPLVTMY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1FUYB5hWrQs",
"name": "ttg555501-09850404s1-tvcg-3196334-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09850404s1-tvcg-3196334-mm.zip",
"extension": "zip",
"size": "476 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Fz4SPLVTMY",
"doi": "10.1109/TVCG.2022.3196606",
"abstract": "Distances are commonly underperceived in virtual reality (VR), and this finding has been documented repeatedly over more than two decades of research. Yet, there is evidence that perceived distance is more accurate in modern compared to older head-mounted displays (HMDs). This meta-analysis of 131 studies describes egocentric distance perception across 20 HMDs, and also examines the relationship between perceived distance and technical HMD characteristics. Judged distance was positively associated with HMD field of view (FOV), positively associated with HMD resolution, and negatively associated with HMD weight. The effects of FOV and resolution were more pronounced among heavier HMDs. These findings suggest that future improvements in these technical characteristics may be central to resolving the problem of distance underperception in VR.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Distances are commonly underperceived in virtual reality (VR), and this finding has been documented repeatedly over more than two decades of research. Yet, there is evidence that perceived distance is more accurate in modern compared to older head-mounted displays (HMDs). This meta-analysis of 131 studies describes egocentric distance perception across 20 HMDs, and also examines the relationship between perceived distance and technical HMD characteristics. Judged distance was positively associated with HMD field of view (FOV), positively associated with HMD resolution, and negatively associated with HMD weight. The effects of FOV and resolution were more pronounced among heavier HMDs. These findings suggest that future improvements in these technical characteristics may be central to resolving the problem of distance underperception in VR.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Distances are commonly underperceived in virtual reality (VR), and this finding has been documented repeatedly over more than two decades of research. Yet, there is evidence that perceived distance is more accurate in modern compared to older head-mounted displays (HMDs). This meta-analysis of 131 studies describes egocentric distance perception across 20 HMDs, and also examines the relationship between perceived distance and technical HMD characteristics. Judged distance was positively associated with HMD field of view (FOV), positively associated with HMD resolution, and negatively associated with HMD weight. The effects of FOV and resolution were more pronounced among heavier HMDs. These findings suggest that future improvements in these technical characteristics may be central to resolving the problem of distance underperception in VR.",
"title": "Distance Perception in Virtual Reality: A Meta-Analysis of the Effect of Head-Mounted Display Characteristics",
"normalizedTitle": "Distance Perception in Virtual Reality: A Meta-Analysis of the Effect of Head-Mounted Display Characteristics",
"fno": "09850416",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Resists",
"Image Resolution",
"Head",
"Virtual Environments",
"Head Mounted Displays",
"Task Analysis",
"Surface Texture",
"Distance Perception",
"Egocentric Distance",
"Field Of View",
"Head Mounted Display",
"Meta Analysis",
"Resolution",
"Virtual Environment",
"Virtual Reality",
"Weight"
],
"authors": [
{
"givenName": "Jonathan W.",
"surname": "Kelly",
"fullName": "Jonathan W. Kelly",
"affiliation": "Department of Psychology, Iowa State Universty, Ames, IA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-08-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2012/4660/0/06402574",
"title": "Occlusion capable optical see-through head-mounted display using freeform optics",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402574/12OmNBEpnEt",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iswc/2009/3779/0/3779a047",
"title": "Clinical Implementation of a Head-Mounted Display of Patient Vital Signs",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/2009/3779a047/12OmNzlUKPY",
"parentPublication": {
"id": "proceedings/iswc/2009/3779/0",
"title": "2009 International Symposium on Wearable Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446345",
"title": "Investigating a Sparse Peripheral Display in a Head-Mounted Display for VR Locomotion",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446345/13bd1fZBGbI",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/07/ttg2011070888",
"title": "Natural Perspective Projections for Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2011/07/ttg2011070888/13rRUwInvJd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a746",
"title": "Depth Reduction in Light-Field Head-Mounted Displays by Generating Intermediate Images as Virtual Images",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a746/1CJcGN8dsS4",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a105",
"title": "Real-Time Recognition of In-Place Body Actions and Head Gestures using Only a Head-Mounted Display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a105/1MNgCnmbXyM",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797736",
"title": "Emotion Recognition in Gamers Wearing Head-mounted Display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797736/1cJ0JubbA6A",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a542",
"title": "Field of View Effect on Distance Perception in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a542/1tnXQ9aew80",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a109",
"title": "Generative RGB-D Face Completion for Head-Mounted Display Removal",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a109/1tnXncnHsIg",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523894",
"title": "Head-Mounted Display with Increased Downward Field of View Improves Presence and Sense of Self-Location",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523894/1wpqkPb7CSY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09850404",
"articleId": "1Fz4SEQnoiY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09852325",
"articleId": "1FFHd5L2SxG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Fu4IsDCz72",
"doi": "10.1109/TVCG.2022.3192713",
"abstract": "In this paper, we propose a controllable high-quality free viewpoint video generation method based on the motion graph and neural radiance fields (NeRF). Different from existing pose-driven NeRF or time/structure conditioned NeRF works, we propose to first construct a directed motion graph of the captured sequence. Such a sequence-motion-parameterization strategy not only enables flexible pose control for free viewpoint video rendering but also avoids redundant calculation of similar poses and thus improves the overall reconstruction efficiency. Moreover, to support body shape control without losing the realistic free viewpoint rendering performance, we improve the vanilla NeRF by combining explicit surface deformation and implicit neural scene representations. Specifically, we train a local surface-guided NeRF for each valid frame on the motion graph, and the volumetric rendering was only performed in the local space around the real surface, thus enabling plausible shape control ability. As far as we know, our method is the first method that supports both realistic free viewpoint video reconstruction and motion graph-based user-guided motion traversal. The results and comparisons further demonstrate the effectiveness of the proposed method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we propose a controllable high-quality free viewpoint video generation method based on the motion graph and neural radiance fields (NeRF). Different from existing pose-driven NeRF or time/structure conditioned NeRF works, we propose to first construct a directed motion graph of the captured sequence. Such a sequence-motion-parameterization strategy not only enables flexible pose control for free viewpoint video rendering but also avoids redundant calculation of similar poses and thus improves the overall reconstruction efficiency. Moreover, to support body shape control without losing the realistic free viewpoint rendering performance, we improve the vanilla NeRF by combining explicit surface deformation and implicit neural scene representations. Specifically, we train a local surface-guided NeRF for each valid frame on the motion graph, and the volumetric rendering was only performed in the local space around the real surface, thus enabling plausible shape control ability. As far as we know, our method is the first method that supports both realistic free viewpoint video reconstruction and motion graph-based user-guided motion traversal. The results and comparisons further demonstrate the effectiveness of the proposed method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we propose a controllable high-quality free viewpoint video generation method based on the motion graph and neural radiance fields (NeRF). Different from existing pose-driven NeRF or time/structure conditioned NeRF works, we propose to first construct a directed motion graph of the captured sequence. Such a sequence-motion-parameterization strategy not only enables flexible pose control for free viewpoint video rendering but also avoids redundant calculation of similar poses and thus improves the overall reconstruction efficiency. Moreover, to support body shape control without losing the realistic free viewpoint rendering performance, we improve the vanilla NeRF by combining explicit surface deformation and implicit neural scene representations. Specifically, we train a local surface-guided NeRF for each valid frame on the motion graph, and the volumetric rendering was only performed in the local space around the real surface, thus enabling plausible shape control ability. As far as we know, our method is the first method that supports both realistic free viewpoint video reconstruction and motion graph-based user-guided motion traversal. The results and comparisons further demonstrate the effectiveness of the proposed method.",
"title": "Controllable Free Viewpoint Video Reconstruction Based on Neural Radiance Fields and Motion Graphs",
"normalizedTitle": "Controllable Free Viewpoint Video Reconstruction Based on Neural Radiance Fields and Motion Graphs",
"fno": "09845414",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Rendering Computer Graphics",
"Training",
"Surface Reconstruction",
"Shape",
"Image Reconstruction",
"Dynamics",
"Aerospace Electronics",
"Controllable Free Viewpoint Video",
"Motion Graph",
"Ne RF",
"Surface Guided Volumetric Rendering"
],
"authors": [
{
"givenName": "He",
"surname": "Zhang",
"fullName": "He Zhang",
"affiliation": "School of Instrumentation and Optoelectronic Engineering, Beihang University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Fan",
"surname": "Li",
"fullName": "Fan Li",
"affiliation": "School of Instrumentation and Optoelectronic Engineering, Beihang University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jianhui",
"surname": "Zhao",
"fullName": "Jianhui Zhao",
"affiliation": "School of Instrumentation and Optoelectronic Engineering, Beihang University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chao",
"surname": "Tan",
"fullName": "Chao Tan",
"affiliation": "Weilan Tech Company, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dongming",
"surname": "Shen",
"fullName": "Dongming Shen",
"affiliation": "University of Southern California, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yebin",
"surname": "Liu",
"fullName": "Yebin Liu",
"affiliation": "Department of Automation and BNRist, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tao",
"surname": "Yu",
"fullName": "Tao Yu",
"affiliation": "Department of Automation and BNRist, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-07-01 00:00:00",
"pubType": "trans",
"pages": "1-16",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2021/2812/0/281200f569",
"title": "UNISURF: Unifying Neural Implicit Surfaces and Radiance Fields for Multi-View Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200f569/1BmEEU96fmg",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200f732",
"title": "PlenOctrees for Real-time Rendering of Neural Radiance Fields",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200f732/1BmEtIfeMZW",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600m2892",
"title": "EfficientNeRF - Efficient Neural Radiance Fields",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600m2892/1H0OvIHTU7S",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600h733",
"title": "HumanNeRF: Efficiently Generated Human Radiance Field from Sparse Inputs",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600h733/1H1jKf11fzy",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600n3514",
"title": "Fourier PlenOctrees for Dynamic Radiance Field Rendering in Real-time",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600n3514/1H1m9gTxNYc",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600f428",
"title": "Point-NeRF: Point-based Neural Radiance Fields",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600f428/1H1mrGLgvra",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2022/5670/0/567000a646",
"title": "Controllable Radiance Fields for Dynamic Face Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2022/567000a646/1KYspKZnHMs",
"parentPublication": {
"id": "proceedings/3dv/2022/5670/0",
"title": "2022 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900j416",
"title": "Space-time Neural Irradiance Fields for Free-Viewpoint Video",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900j416/1yeHJmi5mQo",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900k0313",
"title": "D-NeRF: Neural Radiance Fields for Dynamic Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900k0313/1yeLrBwGgik",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900f700",
"title": "Learning Compositional Radiance Fields of Dynamic Human Heads",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900f700/1yeLzlbmTLy",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09844860",
"articleId": "1Fp5UcDqu3K",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09847102",
"articleId": "1Fu4IEH0oAU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1FvJxlxcQ5G",
"name": "ttg555501-09845414s1-supp1-3192713.m4v",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09845414s1-supp1-3192713.m4v",
"extension": "m4v",
"size": "16.3 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Fu4IEH0oAU",
"doi": "10.1109/TVCG.2022.3193672",
"abstract": "The <italic>MD-Cave</italic> is an immersive analytics system that provides enhanced stereoscopic visualizations to support visual diagnoses performed by radiologists. The system harnesses contemporary paradigms in immersive visualization and 3D interaction, which are better suited for investigating 3D volumetric data. We retain practicality through efficient utilization of desk space and comfort for radiologists in terms of frequent long duration use. <italic>MD-Cave</italic> is general and incorporates: (1) high resolution stereoscopic visualizations through a surround triple-monitor setup, (2) 3D interactions through head and hand tracking, (3) and a general framework that supports 3D visualization of deep-seated anatomical structures without the need for explicit segmentation algorithms. Such a general framework expands the utility of our system to many diagnostic scenarios. We have developed <italic>MD-Cave</italic> through close collaboration and feedback from two expert radiologists who evaluated the utility of <italic>MD-Cave</italic> and the 3D interactions in the context of radiological examinations. We also provide evaluation of <italic>MD-Cave</italic> through case studies performed by an expert radiologist and concrete examples on multiple real-world diagnostic scenarios, such as pancreatic cancer, shoulder-CT, and COVID-19 Chest CT examination.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The <italic>MD-Cave</italic> is an immersive analytics system that provides enhanced stereoscopic visualizations to support visual diagnoses performed by radiologists. The system harnesses contemporary paradigms in immersive visualization and 3D interaction, which are better suited for investigating 3D volumetric data. We retain practicality through efficient utilization of desk space and comfort for radiologists in terms of frequent long duration use. <italic>MD-Cave</italic> is general and incorporates: (1) high resolution stereoscopic visualizations through a surround triple-monitor setup, (2) 3D interactions through head and hand tracking, (3) and a general framework that supports 3D visualization of deep-seated anatomical structures without the need for explicit segmentation algorithms. Such a general framework expands the utility of our system to many diagnostic scenarios. We have developed <italic>MD-Cave</italic> through close collaboration and feedback from two expert radiologists who evaluated the utility of <italic>MD-Cave</italic> and the 3D interactions in the context of radiological examinations. We also provide evaluation of <italic>MD-Cave</italic> through case studies performed by an expert radiologist and concrete examples on multiple real-world diagnostic scenarios, such as pancreatic cancer, shoulder-CT, and COVID-19 Chest CT examination.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The MD-Cave is an immersive analytics system that provides enhanced stereoscopic visualizations to support visual diagnoses performed by radiologists. The system harnesses contemporary paradigms in immersive visualization and 3D interaction, which are better suited for investigating 3D volumetric data. We retain practicality through efficient utilization of desk space and comfort for radiologists in terms of frequent long duration use. MD-Cave is general and incorporates: (1) high resolution stereoscopic visualizations through a surround triple-monitor setup, (2) 3D interactions through head and hand tracking, (3) and a general framework that supports 3D visualization of deep-seated anatomical structures without the need for explicit segmentation algorithms. Such a general framework expands the utility of our system to many diagnostic scenarios. We have developed MD-Cave through close collaboration and feedback from two expert radiologists who evaluated the utility of MD-Cave and the 3D interactions in the context of radiological examinations. We also provide evaluation of MD-Cave through case studies performed by an expert radiologist and concrete examples on multiple real-world diagnostic scenarios, such as pancreatic cancer, shoulder-CT, and COVID-19 Chest CT examination.",
"title": "<italic>MD-Cave</italic>: An Immersive Visualization Workbench for Radiologists",
"normalizedTitle": "MD-Cave: An Immersive Visualization Workbench for Radiologists",
"fno": "09847102",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Three Dimensional Displays",
"Visualization",
"Stereo Image Processing",
"Data Visualization",
"Task Analysis",
"Head",
"Radiology",
"3 D Selection",
"Immersive Diagnosis",
"Stereoscopic Visualization",
"Volume Rendering"
],
"authors": [
{
"givenName": "Shreeraj",
"surname": "Jadhav",
"fullName": "Shreeraj Jadhav",
"affiliation": "Department of Computer Science, Stony Brook University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Arie E.",
"surname": "Kaufman",
"fullName": "Arie E. Kaufman",
"affiliation": "Department of Computer Science, Stony Brook University, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-07-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/visapp/2014/8133/3/07295085",
"title": "Immersive visualizations in a VR Cave environment for the training and enhancement of social skills for children with autism",
"doi": null,
"abstractUrl": "/proceedings-article/visapp/2014/07295085/12OmNBQkx5E",
"parentPublication": {
"id": "proceedings/visapp/2014/8133/2",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2016/4149/0/4149a178",
"title": "A CAVE/Desktop Collaborative Virtual Environment for Offshore Oil Platform Training",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2016/4149a178/12OmNrJiCGH",
"parentPublication": {
"id": "proceedings/svr/2016/4149/0",
"title": "2016 XVIII Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvri/2011/0054/0/05759646",
"title": "Image-based stereo background modeling for CAVE system",
"doi": null,
"abstractUrl": "/proceedings-article/isvri/2011/05759646/12OmNxG1yLl",
"parentPublication": {
"id": "proceedings/isvri/2011/0054/0",
"title": "2011 IEEE International Symposium on VR Innovation (ISVRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07539620",
"title": "Immersive Collaborative Analysis of Network Connectivity: CAVE-style or Head-Mounted Display?",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07539620/13rRUwcS1D0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/04/07384536",
"title": "Examining Rotation Gain in CAVE-like Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2016/04/07384536/13rRUxOdD2H",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/03/ttg2012030381",
"title": "Autocalibration of Multiprojector CAVE-Like Immersive Environments",
"doi": null,
"abstractUrl": "/journal/tg/2012/03/ttg2012030381/13rRUy0qnLF",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2020/01/08821384",
"title": "A Compelling Virtual Tour of the Dunhuang Cave With an Immersive Head-Mounted Display",
"doi": null,
"abstractUrl": "/magazine/cg/2020/01/08821384/1eTOS0wFeY8",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/06/09222121",
"title": "<italic>SAniHead:</italic> Sketching Animal-Like 3D Character Heads Using a View-Surface Collaborative Mesh Generative Network",
"doi": null,
"abstractUrl": "/journal/tg/2022/06/09222121/1nTqKKPoy5i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cds/2020/7106/0/710600a377",
"title": "A virtual environment making method for CAVE system",
"doi": null,
"abstractUrl": "/proceedings-article/cds/2020/710600a377/1pqa4RCdUAg",
"parentPublication": {
"id": "proceedings/cds/2020/7106/0",
"title": "2020 International Conference on Computing and Data Science (CDS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552241",
"title": "<italic>COVID</italic>-view: Diagnosis of COVID-19 using Chest CT",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552241/1xic6RdmNC8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09845414",
"articleId": "1Fu4IsDCz72",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09850404",
"articleId": "1Fz4SEQnoiY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1FvJy8Nb6xO",
"name": "ttg555501-09847102s1-supp1-3193672.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09847102s1-supp1-3193672.mp4",
"extension": "mp4",
"size": "98.9 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Fp5UcDqu3K",
"doi": "10.1109/TVCG.2022.3195111",
"abstract": "In a future of pervasive augmented reality (AR), AR systems will need to be able to efficiently draw or guide the attention of the user to visual points of interest in their physical-virtual environment. Since AR imagery is overlaid on top of the user's view of their physical environment, these attention guidance techniques must not only compete with other virtual imagery, but also with distracting or attention-grabbing features in the user's physical environment. Because of the wide range of physical-virtual environments that pervasive AR users will find themselves in, it is difficult to design visual cues that “pop out” to the user without performing a visual analysis of the user's environment, and changing the appearance of the cue to stand out from its surroundings. In this paper, we present an initial investigation into the potential uses of dichoptic visual cues for optical see-through AR displays, specifically cues that involve having a difference in hue, saturation, or value between the user's eyes. These types of cues have been shown to be preattentively processed by the user when presented on other stereoscopic displays, and may also be an effective method of drawing user attention on optical see-through AR displays. We present two user studies: one that evaluates the saliency of dichoptic visual cues on optical see-through displays, and one that evaluates their subjective qualities. Our results suggest that hue-based dichoptic cues or “Forbidden Colors” may be particularly effective for these purposes, achieving significantly lower error rates in a pop out task compared to value-based and saturation-based cues.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In a future of pervasive augmented reality (AR), AR systems will need to be able to efficiently draw or guide the attention of the user to visual points of interest in their physical-virtual environment. Since AR imagery is overlaid on top of the user's view of their physical environment, these attention guidance techniques must not only compete with other virtual imagery, but also with distracting or attention-grabbing features in the user's physical environment. Because of the wide range of physical-virtual environments that pervasive AR users will find themselves in, it is difficult to design visual cues that “pop out” to the user without performing a visual analysis of the user's environment, and changing the appearance of the cue to stand out from its surroundings. In this paper, we present an initial investigation into the potential uses of dichoptic visual cues for optical see-through AR displays, specifically cues that involve having a difference in hue, saturation, or value between the user's eyes. These types of cues have been shown to be preattentively processed by the user when presented on other stereoscopic displays, and may also be an effective method of drawing user attention on optical see-through AR displays. We present two user studies: one that evaluates the saliency of dichoptic visual cues on optical see-through displays, and one that evaluates their subjective qualities. Our results suggest that hue-based dichoptic cues or “Forbidden Colors” may be particularly effective for these purposes, achieving significantly lower error rates in a pop out task compared to value-based and saturation-based cues.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In a future of pervasive augmented reality (AR), AR systems will need to be able to efficiently draw or guide the attention of the user to visual points of interest in their physical-virtual environment. Since AR imagery is overlaid on top of the user's view of their physical environment, these attention guidance techniques must not only compete with other virtual imagery, but also with distracting or attention-grabbing features in the user's physical environment. Because of the wide range of physical-virtual environments that pervasive AR users will find themselves in, it is difficult to design visual cues that “pop out” to the user without performing a visual analysis of the user's environment, and changing the appearance of the cue to stand out from its surroundings. In this paper, we present an initial investigation into the potential uses of dichoptic visual cues for optical see-through AR displays, specifically cues that involve having a difference in hue, saturation, or value between the user's eyes. These types of cues have been shown to be preattentively processed by the user when presented on other stereoscopic displays, and may also be an effective method of drawing user attention on optical see-through AR displays. We present two user studies: one that evaluates the saliency of dichoptic visual cues on optical see-through displays, and one that evaluates their subjective qualities. Our results suggest that hue-based dichoptic cues or “Forbidden Colors” may be particularly effective for these purposes, achieving significantly lower error rates in a pop out task compared to value-based and saturation-based cues.",
"title": "Analysis of the Saliency of Color-Based Dichoptic Cues in Optical See-Through Augmented Reality",
"normalizedTitle": "Analysis of the Saliency of Color-Based Dichoptic Cues in Optical See-Through Augmented Reality",
"fno": "09844860",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visualization",
"Task Analysis",
"Observers",
"Image Color Analysis",
"Optical Saturation",
"Optical Imaging",
"Color",
"Augmented Reality",
"Optical See Through Displays",
"Visual Perception",
"Attention Cues",
"Preattentive Cues",
"Dichoptic Cues",
"Human Computer Interaction HCI"
],
"authors": [
{
"givenName": "Austin",
"surname": "Erickson",
"fullName": "Austin Erickson",
"affiliation": "University of Central Florida, Orlando, FL, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gerd",
"surname": "Bruder",
"fullName": "Gerd Bruder",
"affiliation": "University of Central Florida, Orlando, FL, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gregory F.",
"surname": "Welch",
"fullName": "Gregory F. Welch",
"affiliation": "University of Central Florida, Orlando, FL, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-07-01 00:00:00",
"pubType": "trans",
"pages": "1-16",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2015/11/07165643",
"title": "Semi-Parametric Color Reproduction Method for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/11/07165643/13rRUILtJzB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08302393",
"title": "Driver Behavior and Performance with Augmented Reality Pedestrian Collision Warning: An Outdoor User Study",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08302393/13rRUwInvJm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a213",
"title": "Evaluating Visual Cues for Future Airborne Surveillance Using Simulated Augmented Reality Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a213/1CJbS2QCX5e",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a800",
"title": "Add-on Occlusion: An External Module for Optical See-through Augmented Reality Displays to Support Mutual Occlusion",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a800/1CJeADcapNK",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a352",
"title": "Effects of Optical See-Through Displays on Self-Avatar Appearance in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a352/1J7WodvTPzy",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a389",
"title": "Objective Measurements of Background Color Shifts Caused by Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a389/1J7WuL68jAY",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10077744",
"title": "A Comparative Evaluation of Optical See-through Augmented Reality in Surgical Guidance",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10077744/1LH8EZ3NEGI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08827571",
"title": "Varifocal Occlusion-Capable Optical See-through Augmented Reality Display based on Focus-tunable Optics",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08827571/1dgvaPxmhbi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/08/09293392",
"title": "A Perceptual Color-Matching Method for Examining Color Blending in Augmented Reality Head-Up Display Graphics",
"doi": null,
"abstractUrl": "/journal/tg/2022/08/09293392/1pyomiXbJQs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a115",
"title": "Perceived Transparency in Optical See-Through Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a115/1yeQLPBHFBe",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09839681",
"articleId": "1FisL8u19du",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09845414",
"articleId": "1Fu4IsDCz72",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1Fu4I7GI7TO",
"name": "ttg555501-09844860s1-supp1-3195111.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09844860s1-supp1-3195111.mp4",
"extension": "mp4",
"size": "56.9 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1FisKWeqz8Q",
"doi": "10.1109/TVCG.2022.3193756",
"abstract": "Information visualization uses various types of representations to encode data into graphical formats. Prior work on visualization techniques has evaluated the accuracy of perceived numerical data values from visual data encodings such as graphical position, length, orientation, size, and color. Our work aims to extend the research of graphical perception to the use of motion as data encodings for quantitative values. We present two experiments implementing multiple fundamental aspects of motion such as type, speed, and synchronicity that can be used for numerical value encoding as well as comparing motion to static visual encodings in terms of user perception and accuracy. We studied how well users can assess the differences between several types of motion and static visual encodings and present an updated ranking of accuracy for quantitative judgments. Our results indicate that non-synchronized motion can be interpreted more quickly and more accurately than synchronized motion. Moreover, our ranking of static and motion visual representations shows that motion, especially expansion and translational types, has great potential as a data encoding technique for quantitative value. Finally, we discuss the implications for the use of animation and motion for numerical representations in data visualization.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Information visualization uses various types of representations to encode data into graphical formats. Prior work on visualization techniques has evaluated the accuracy of perceived numerical data values from visual data encodings such as graphical position, length, orientation, size, and color. Our work aims to extend the research of graphical perception to the use of motion as data encodings for quantitative values. We present two experiments implementing multiple fundamental aspects of motion such as type, speed, and synchronicity that can be used for numerical value encoding as well as comparing motion to static visual encodings in terms of user perception and accuracy. We studied how well users can assess the differences between several types of motion and static visual encodings and present an updated ranking of accuracy for quantitative judgments. Our results indicate that non-synchronized motion can be interpreted more quickly and more accurately than synchronized motion. Moreover, our ranking of static and motion visual representations shows that motion, especially expansion and translational types, has great potential as a data encoding technique for quantitative value. Finally, we discuss the implications for the use of animation and motion for numerical representations in data visualization.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Information visualization uses various types of representations to encode data into graphical formats. Prior work on visualization techniques has evaluated the accuracy of perceived numerical data values from visual data encodings such as graphical position, length, orientation, size, and color. Our work aims to extend the research of graphical perception to the use of motion as data encodings for quantitative values. We present two experiments implementing multiple fundamental aspects of motion such as type, speed, and synchronicity that can be used for numerical value encoding as well as comparing motion to static visual encodings in terms of user perception and accuracy. We studied how well users can assess the differences between several types of motion and static visual encodings and present an updated ranking of accuracy for quantitative judgments. Our results indicate that non-synchronized motion can be interpreted more quickly and more accurately than synchronized motion. Moreover, our ranking of static and motion visual representations shows that motion, especially expansion and translational types, has great potential as a data encoding technique for quantitative value. Finally, we discuss the implications for the use of animation and motion for numerical representations in data visualization.",
"title": "Evaluating Graphical Perception of Visual Motion for Quantitative Data Encoding",
"normalizedTitle": "Evaluating Graphical Perception of Visual Motion for Quantitative Data Encoding",
"fno": "09839572",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Encoding",
"Visualization",
"Data Visualization",
"Animation",
"Image Color Analysis",
"Task Analysis",
"Synchronization",
"Animation And Motion Related Techniques",
"Empirical Study",
"Evaluation",
"Graphical Perception",
"Information Visualization"
],
"authors": [
{
"givenName": "Shaghayegh",
"surname": "Esmaeili",
"fullName": "Shaghayegh Esmaeili",
"affiliation": "University of Florida, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Samia",
"surname": "Kabir",
"fullName": "Samia Kabir",
"affiliation": "Purdue University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anthony M.",
"surname": "Colas",
"fullName": "Anthony M. Colas",
"affiliation": "University of Florida, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rhema P.",
"surname": "Linder",
"fullName": "Rhema P. Linder",
"affiliation": "University of Tennessee, Knoxville, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Eric D.",
"surname": "Ragan",
"fullName": "Eric D. Ragan",
"affiliation": "University of Florida, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-07-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-infovis/2002/1751/0/01173146",
"title": "Graphical encoding for information visualization: an empirical study",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/2002/01173146/12OmNrMZpGn",
"parentPublication": {
"id": "proceedings/ieee-infovis/2002/1751/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/infvis/2002/1751/0/01173146",
"title": "Graphical encoding for information visualization: an empirical study",
"doi": null,
"abstractUrl": "/proceedings-article/infvis/2002/01173146/12OmNz2kqqP",
"parentPublication": {
"id": "proceedings/infvis/2002/1751/0",
"title": "Proceedings InfoVis 2002. IEEE Symposium on Information Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017638",
"title": "Assessing the Graphical Perception of Time and Speed on 2D+Time Trajectories",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017638/13rRUx0xPTV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/03/07875127",
"title": "Evaluating Interactive Graphical Encodings for Data Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2018/03/07875127/13rRUxly9e0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122631",
"title": "Graphical Overlays: Using Layered Elements to Aid Chart Reading",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122631/13rRUyfKIHJ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440806",
"title": "Evaluating ‘Graphical Perception’ with CNNs",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440806/17D45We0UEy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09978718",
"title": "The Risks of Ranking: Revisiting Graphical Perception to Model Individual Differences in Visualization Performance",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09978718/1IXUnbRdUEE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08809678",
"title": "Investigating Direct Manipulation of Graphical Encodings as a Method for User Interaction",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08809678/1cHEi01VEYg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08836120",
"title": "Measures of the Benefit of Direct Encoding of Data Deltas for Data Pair Relation Perception",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08836120/1dia2KVa7g4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222295",
"title": "Modeling the Influence of Visual Density on Cluster Perception in Scatterplots Using Topology",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222295/1nTqtC45a12",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09837104",
"articleId": "1FbOBmxWmRi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09839681",
"articleId": "1FisL8u19du",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1FvJyKqrPK8",
"name": "ttg555501-09839572s1-supp1-3193756.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09839572s1-supp1-3193756.mp4",
"extension": "mp4",
"size": "19.8 MB",
"__typename": "WebExtraType"
},
{
"id": "1FvJyzSaqVq",
"name": "ttg555501-09839572s1-supp2-3193756.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09839572s1-supp2-3193756.mp4",
"extension": "mp4",
"size": "10.4 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1FisL8u19du",
"doi": "10.1109/TVCG.2022.3193406",
"abstract": "Learning-based surface reconstruction methods have received considerable attention in recent years due to their excellent expressiveness. However, existing learning-based methods lack scalability in processing large-scale point clouds. This paper proposes a novel scalable learning-based 3D surface reconstruction method based on octree, called SSRNet. SSRNet works in a scalable reconstruction pipeline, which divides oriented point clouds into different local parts and then processes them in parallel. Accommodating this scalable design pattern, SSRNet constructs local geometric features for octree vertices. Such features comprise the relation between the vertices and the implicit surface, ensuring geometric perception. Focusing on local geometric information also enables the network to avoid the overfitting problem and generalize well on different datasets. Finally, as a learning-based method, SSRNet can process large-scale point clouds in a short time. And to further solve the efficiency problem, we provide a lightweight and efficient version that is about five times faster while maintaining reconstruction performance. Experiments show that our methods achieve state-of-the-art performance with outstanding efficiency.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Learning-based surface reconstruction methods have received considerable attention in recent years due to their excellent expressiveness. However, existing learning-based methods lack scalability in processing large-scale point clouds. This paper proposes a novel scalable learning-based 3D surface reconstruction method based on octree, called SSRNet. SSRNet works in a scalable reconstruction pipeline, which divides oriented point clouds into different local parts and then processes them in parallel. Accommodating this scalable design pattern, SSRNet constructs local geometric features for octree vertices. Such features comprise the relation between the vertices and the implicit surface, ensuring geometric perception. Focusing on local geometric information also enables the network to avoid the overfitting problem and generalize well on different datasets. Finally, as a learning-based method, SSRNet can process large-scale point clouds in a short time. And to further solve the efficiency problem, we provide a lightweight and efficient version that is about five times faster while maintaining reconstruction performance. Experiments show that our methods achieve state-of-the-art performance with outstanding efficiency.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Learning-based surface reconstruction methods have received considerable attention in recent years due to their excellent expressiveness. However, existing learning-based methods lack scalability in processing large-scale point clouds. This paper proposes a novel scalable learning-based 3D surface reconstruction method based on octree, called SSRNet. SSRNet works in a scalable reconstruction pipeline, which divides oriented point clouds into different local parts and then processes them in parallel. Accommodating this scalable design pattern, SSRNet constructs local geometric features for octree vertices. Such features comprise the relation between the vertices and the implicit surface, ensuring geometric perception. Focusing on local geometric information also enables the network to avoid the overfitting problem and generalize well on different datasets. Finally, as a learning-based method, SSRNet can process large-scale point clouds in a short time. And to further solve the efficiency problem, we provide a lightweight and efficient version that is about five times faster while maintaining reconstruction performance. Experiments show that our methods achieve state-of-the-art performance with outstanding efficiency.",
"title": "SSRNet: Scalable 3D Surface Reconstruction Network",
"normalizedTitle": "SSRNet: Scalable 3D Surface Reconstruction Network",
"fno": "09839681",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Surface Reconstruction",
"Point Cloud Compression",
"Octrees",
"Surface Treatment",
"Learning Systems",
"Three Dimensional Displays",
"Reconstruction Algorithms",
"Surface Reconstruction",
"Implicit Function",
"Oriented Point Clouds",
"Large Scale Point Clouds"
],
"authors": [
{
"givenName": "Ganzhangqin",
"surname": "Yuan",
"fullName": "Ganzhangqin Yuan",
"affiliation": "National Key Laboratory of Science and Technology on Multispectral Information Processing, School of Artificial Intelligence and Automation, Huazhong University of Science and Technology, Wuhan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Qiancheng",
"surname": "Fu",
"fullName": "Qiancheng Fu",
"affiliation": "National Key Laboratory of Science and Technology on Multispectral Information Processing, School of Artificial Intelligence and Automation, Huazhong University of Science and Technology, Wuhan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhenxing",
"surname": "Mi",
"fullName": "Zhenxing Mi",
"affiliation": "Hong Kong University of Science and Technology, Kowloon Tong, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yiming",
"surname": "Luo",
"fullName": "Yiming Luo",
"affiliation": "Imperial College London, London, United Kingdom",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wenbing",
"surname": "Tao",
"fullName": "Wenbing Tao",
"affiliation": "National Key Laboratory of Science and Technology on Multispectral Information Processing, School of Artificial Intelligence and Automation, Huazhong University of Science and Technology, Wuhan, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-07-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2012/2216/0/06460082",
"title": "Harnessing self-similarity for reconstruction of large missing regions in 3D Models",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460082/12OmNBlofQO",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2015/9711/0/5720a157",
"title": "Incremental Division of Very Large Point Clouds for Scalable 3D Surface Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2015/5720a157/12OmNvT2p0I",
"parentPublication": {
"id": "proceedings/iccvw/2015/9711/0",
"title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457c501",
"title": "Scalable Surface Reconstruction from Point Clouds with Extreme Scale and Density Diversity",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457c501/12OmNwpXRXO",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391b341",
"title": "Global, Dense Multiscale Reconstruction for a Billion Points",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391b341/12OmNzBOiik",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/05/ttg2011050669",
"title": "Data-Parallel Octrees for Surface Reconstruction",
"doi": null,
"abstractUrl": "/journal/tg/2011/05/ttg2011050669/13rRUxCitJ9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-smartcity-dss/2017/2588/0/08291962",
"title": "Integrated Quality Mesh Generation for Poisson Surface Reconstruction in HPC Applications",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-smartcity-dss/2017/08291962/17D45VsBTYE",
"parentPublication": {
"id": "proceedings/hpcc-smartcity-dss/2017/2588/0",
"title": "2017 IEEE 19th International Conference on High Performance Computing and Communications; IEEE 15th International Conference on Smart City; IEEE 3rd International Conference on Data Science and Systems (HPCC/SmartCity/DSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200f621",
"title": "Out-of-Core Surface Reconstruction via Global TGV Minimization",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200f621/1BmFe562Y9O",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10093999",
"title": "ANISE: Assembly-based Neural Implicit Surface rEconstruction",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10093999/1M80HueHnJS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300k0122",
"title": "Deep Geometric Prior for Surface Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300k0122/1gyrJvG8Kt2",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800a967",
"title": "SSRNet: Scalable 3D Surface Reconstruction Network",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800a967/1m3nKc80MlG",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09839572",
"articleId": "1FisKWeqz8Q",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09844860",
"articleId": "1Fp5UcDqu3K",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1FapOsLgEik",
"doi": "10.1109/TVCG.2022.3192364",
"abstract": "Immersive visualization in (VR) allows us to exploit visual cues for perception in 3D space, yet few existing studies have measured the effects of visual cues. Across a desktop monitor and a head-mounted display (HMD), we assessed scatterplot designs which vary their use of visual cues—motion, shading, perspective (graphical projection), and dimensionality—on two sets of data. We conducted a user study with a summary task in which 32 participants estimated the classification accuracy of an artificial from the scatterplots. With Bayesian multilevel modeling, we capture the intricate visual effects and find that no cue alone explains all the variance in estimation error. Visual motion cues generally reduce participants’ estimation error; besides this motion, using other cues may increase participants’ estimation error. Using an HMD, adding visual motion cues, providing a third data dimension, or showing a more complicated dataset leads to longer response times. We speculate that most visual cues may not strongly affect perception in immersive analytics unless they change people's mental model about data. In summary, by studying participants as they interpret the output from a complicated machine learning model, we advance our understanding of how to use the visual cues in immersive analytics.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Immersive visualization in (VR) allows us to exploit visual cues for perception in 3D space, yet few existing studies have measured the effects of visual cues. Across a desktop monitor and a head-mounted display (HMD), we assessed scatterplot designs which vary their use of visual cues—motion, shading, perspective (graphical projection), and dimensionality—on two sets of data. We conducted a user study with a summary task in which 32 participants estimated the classification accuracy of an artificial from the scatterplots. With Bayesian multilevel modeling, we capture the intricate visual effects and find that no cue alone explains all the variance in estimation error. Visual motion cues generally reduce participants’ estimation error; besides this motion, using other cues may increase participants’ estimation error. Using an HMD, adding visual motion cues, providing a third data dimension, or showing a more complicated dataset leads to longer response times. We speculate that most visual cues may not strongly affect perception in immersive analytics unless they change people's mental model about data. In summary, by studying participants as they interpret the output from a complicated machine learning model, we advance our understanding of how to use the visual cues in immersive analytics.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Immersive visualization in (VR) allows us to exploit visual cues for perception in 3D space, yet few existing studies have measured the effects of visual cues. Across a desktop monitor and a head-mounted display (HMD), we assessed scatterplot designs which vary their use of visual cues—motion, shading, perspective (graphical projection), and dimensionality—on two sets of data. We conducted a user study with a summary task in which 32 participants estimated the classification accuracy of an artificial from the scatterplots. With Bayesian multilevel modeling, we capture the intricate visual effects and find that no cue alone explains all the variance in estimation error. Visual motion cues generally reduce participants’ estimation error; besides this motion, using other cues may increase participants’ estimation error. Using an HMD, adding visual motion cues, providing a third data dimension, or showing a more complicated dataset leads to longer response times. We speculate that most visual cues may not strongly affect perception in immersive analytics unless they change people's mental model about data. In summary, by studying participants as they interpret the output from a complicated machine learning model, we advance our understanding of how to use the visual cues in immersive analytics.",
"title": "Visual Cue Effects on a Classification Accuracy Estimation Task in Immersive Scatterplots",
"normalizedTitle": "Visual Cue Effects on a Classification Accuracy Estimation Task in Immersive Scatterplots",
"fno": "09834145",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visualization",
"Task Analysis",
"Data Visualization",
"Three Dimensional Displays",
"Neural Networks",
"Monitoring",
"Training",
"Virtual Reality",
"Cluster Perception",
"Information Visualization",
"Immersive Analytics",
"Dimension Reduction",
"Classification"
],
"authors": [
{
"givenName": "Fumeng",
"surname": "Yang",
"fullName": "Fumeng Yang",
"affiliation": "Northwestern University, Evanston, IL, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "James",
"surname": "Tompkin",
"fullName": "James Tompkin",
"affiliation": "Department of Computer Science, Brown University, Providence, RI, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lane",
"surname": "Harrison",
"fullName": "Lane Harrison",
"affiliation": "Department of Computer Science, Worcester Polytechnic Institute, Worcester, MA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "David H.",
"surname": "Laidlaw",
"fullName": "David H. Laidlaw",
"affiliation": "Department of Computer Science, Brown University, Providence, RI, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-07-01 00:00:00",
"pubType": "trans",
"pages": "1-16",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2018/3365/0/08446261",
"title": "The Influence of Avatar Representation and Behavior on Communication in Social Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446261/13bd1gCd7T2",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08447558",
"title": "Immersive Visualization of Abstract Information: An Evaluation on Dimensionally-Reduced Data Scatterplots",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08447558/13bd1tMztYK",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a093",
"title": "Exploring the Design Space for Immersive Embodiment in Dance",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a093/1CJc1vWLV6w",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a369",
"title": "Spatial Perception in Immersive Visualization: A Study and Findings",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a369/1J7Waia4UXm",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797978",
"title": "IATK: An Immersive Analytics Toolkit",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797978/1cJ0GweguUo",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08854316",
"title": "Evaluating an Immersive Space-Time Cube Geovisualization for Intuitive Trajectory Data Exploration",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08854316/1dM2fkHbAVa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933547",
"title": "Evidence for Area as the Primary Visual Cue in Pie Charts",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933547/1fTgFhkepQk",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222346",
"title": "Shared Surfaces and Spaces: Collaborative Data Visualisation in a Co-located Immersive Environment",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222346/1nTqW9mGTrG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222098",
"title": "Embodied Navigation in Immersive Abstract Data Visualization: Is Overview+Detail or Zooming Better for 3D Scatterplots?",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222098/1nTrQ1hHyyA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a502",
"title": "Visual Indicators for Monitoring Students in a VR class",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a502/1tnXkpvZfqg",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09827962",
"articleId": "1EWSvmlatmU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09837104",
"articleId": "1FbOBmxWmRi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1FbOBMls0OQ",
"name": "ttg555501-09834145s1-supp1-3192364.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09834145s1-supp1-3192364.pdf",
"extension": "pdf",
"size": "8.54 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1FbOBmxWmRi",
"doi": "10.1109/TVCG.2022.3193018",
"abstract": "Tree modeling has been extensively studied in computer graphics. Recent advances in the development of high-resolution sensors and data processing techniques are extremely useful for collecting 3D datasets of real-world trees and generating increasingly plausible branching structures. The rapid development of versatile acquisition platforms allows us to capture multi-view images and scanned data that can be used for guided 3D tree modeling. In this paper, we carry out a comprehensive review of the state-of-the-art methods for the 3D modeling of botanical tree geometry by taking input data from real scenarios. A wide range of studies has been proposed following different approaches. To summarize the most relevant contributions, these have been classified into three categories: (1) procedural reconstruction, (2) geometry-based extraction, which is also divided into three groups (thinning, clustering and spanning tree refinement), and (3) image-based modeling. In addition, we describe other approaches aimed at improving the reconstruction process by adding additional features to achieve a realistic appearance of the tree models. Thus, we provide an overview of the most effective procedures to assist researchers in the photorealistic modeling of trees in geometry and appearance. The article concludes with remarks and trends for promising research opportunities in 3D tree modeling using real-world data.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Tree modeling has been extensively studied in computer graphics. Recent advances in the development of high-resolution sensors and data processing techniques are extremely useful for collecting 3D datasets of real-world trees and generating increasingly plausible branching structures. The rapid development of versatile acquisition platforms allows us to capture multi-view images and scanned data that can be used for guided 3D tree modeling. In this paper, we carry out a comprehensive review of the state-of-the-art methods for the 3D modeling of botanical tree geometry by taking input data from real scenarios. A wide range of studies has been proposed following different approaches. To summarize the most relevant contributions, these have been classified into three categories: (1) procedural reconstruction, (2) geometry-based extraction, which is also divided into three groups (thinning, clustering and spanning tree refinement), and (3) image-based modeling. In addition, we describe other approaches aimed at improving the reconstruction process by adding additional features to achieve a realistic appearance of the tree models. Thus, we provide an overview of the most effective procedures to assist researchers in the photorealistic modeling of trees in geometry and appearance. The article concludes with remarks and trends for promising research opportunities in 3D tree modeling using real-world data.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Tree modeling has been extensively studied in computer graphics. Recent advances in the development of high-resolution sensors and data processing techniques are extremely useful for collecting 3D datasets of real-world trees and generating increasingly plausible branching structures. The rapid development of versatile acquisition platforms allows us to capture multi-view images and scanned data that can be used for guided 3D tree modeling. In this paper, we carry out a comprehensive review of the state-of-the-art methods for the 3D modeling of botanical tree geometry by taking input data from real scenarios. A wide range of studies has been proposed following different approaches. To summarize the most relevant contributions, these have been classified into three categories: (1) procedural reconstruction, (2) geometry-based extraction, which is also divided into three groups (thinning, clustering and spanning tree refinement), and (3) image-based modeling. In addition, we describe other approaches aimed at improving the reconstruction process by adding additional features to achieve a realistic appearance of the tree models. Thus, we provide an overview of the most effective procedures to assist researchers in the photorealistic modeling of trees in geometry and appearance. The article concludes with remarks and trends for promising research opportunities in 3D tree modeling using real-world data.",
"title": "Modeling of the 3D Tree Skeleton using Real-World Data: A Survey",
"normalizedTitle": "Modeling of the 3D Tree Skeleton using Real-World Data: A Survey",
"fno": "09837104",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Vegetation",
"Three Dimensional Displays",
"Solid Modeling",
"Data Models",
"Skeleton",
"Image Reconstruction",
"Geometry",
"Tree Modeling",
"3 D Data",
"Real World Data Processing",
"Computational Geometry",
"Realistic Rendering"
],
"authors": [
{
"givenName": "José L.",
"surname": "Cárdenas-Donoso",
"fullName": "José L. Cárdenas-Donoso",
"affiliation": "Computer Graphics and Geomatics Lab, University of Jaén, ES, Spain",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Carlos J.",
"surname": "Ogayar",
"fullName": "Carlos J. Ogayar",
"affiliation": "Computer Graphics and Geomatics Lab, University of Jaén, ES, Spain",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Francisco R.",
"surname": "Feito",
"fullName": "Francisco R. Feito",
"affiliation": "Computer Graphics and Geomatics Lab, University of Jaén, ES, Spain",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Juan M.",
"surname": "Jurado",
"fullName": "Juan M. Jurado",
"affiliation": "Computer Graphics and Geomatics Lab, University of Jaén, ES, Spain",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-07-01 00:00:00",
"pubType": "trans",
"pages": "1-17",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/crv/2017/2818/0/2818a131",
"title": "Leveraging Tree Statistics for Extracting Anatomical Trees from 3D Medical Images",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2017/2818a131/12OmNwCsdAO",
"parentPublication": {
"id": "proceedings/crv/2017/2818/0",
"title": "2017 14th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisis/2014/4325/0/4325a324",
"title": "A 3D Authoring Method by Editing Real World Scene",
"doi": null,
"abstractUrl": "/proceedings-article/cisis/2014/4325a324/12OmNya72sn",
"parentPublication": {
"id": "proceedings/cisis/2014/4325/0",
"title": "2014 Eighth International Conference on Complex, Intelligent and Software Intensive Systems (CISIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2016/0641/0/07477596",
"title": "Measuring and modeling apple trees using time-of-flight data for automation of dormant pruning applications",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2016/07477596/12OmNzw8jeL",
"parentPublication": {
"id": "proceedings/wacv/2016/0641/0",
"title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/12/07368927",
"title": "Tree Modeling with Real Tree-Parts Examples",
"doi": null,
"abstractUrl": "/journal/tg/2016/12/07368927/13rRUxlgxTp",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2021/0679/0/067900a055",
"title": "3D Forest-tree Modeling Approach Based on Loading Segment Models",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2021/067900a055/1CATsJrErhC",
"parentPublication": {
"id": "proceedings/itme/2021/0679/0",
"title": "2021 11th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/11/08744364",
"title": "Efficient Representation of Geometric Tree Models with Level-of-Detail Using Compressed 3D Chain Code",
"doi": null,
"abstractUrl": "/journal/tg/2020/11/08744364/1bmEPJMhdOo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icict/2020/7283/0/728300a091",
"title": "Simulate Forest Trees by Integrating L-System and 3D CAD Files",
"doi": null,
"abstractUrl": "/proceedings-article/icict/2020/728300a091/1jPb7TIJWXm",
"parentPublication": {
"id": "proceedings/icict/2020/7283/0",
"title": "2020 3rd International Conference on Information and Computer Technologies (ICICT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2019/4752/0/09212896",
"title": "Interactive Modeling of Trees Using VR Devices",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2019/09212896/1nHRRssduko",
"parentPublication": {
"id": "proceedings/icvrv/2019/4752/0",
"title": "2019 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacvw/2021/1967/0/196700a181",
"title": "Neural vision-based semantic 3D world modeling",
"doi": null,
"abstractUrl": "/proceedings-article/wacvw/2021/196700a181/1sZ3qrWold6",
"parentPublication": {
"id": "proceedings/wacvw/2021/1967/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision Workshops (WACVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a826",
"title": "Mid-Air Finger Sketching for Tree Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a826/1tuBbGEUWm4",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09834145",
"articleId": "1FapOsLgEik",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09839572",
"articleId": "1FisKWeqz8Q",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1EWSvmlatmU",
"doi": "10.1109/TVCG.2022.3188940",
"abstract": "Both in digital and print media, it is common to use static maps to show the evolution of values in various regions over time. The ability to communicate local or global trends, while reducing the cognitive load on readers, is of vital importance for an audience that is not always well versed in map interpretation. This study aims to measure the efficiency of four static maps (choropleth, tile grid map and their banded versions) to test their usefulness in presenting changes over time from a user experience perspective. We first evaluate the effectiveness of these map types by quantitative performance analysis (time and success rates). In a second phase, we gather qualitative data to detect which type of map favors decision-making. On a quantitative level, our results show that certain types of maps work better to show global trends, while other types are more useful when analyzing regional trends or detecting the regions that fit a specific pattern. On a qualitative level, those representations which are already familiar to the user are often better valued despite having lower measured success rates.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Both in digital and print media, it is common to use static maps to show the evolution of values in various regions over time. The ability to communicate local or global trends, while reducing the cognitive load on readers, is of vital importance for an audience that is not always well versed in map interpretation. This study aims to measure the efficiency of four static maps (choropleth, tile grid map and their banded versions) to test their usefulness in presenting changes over time from a user experience perspective. We first evaluate the effectiveness of these map types by quantitative performance analysis (time and success rates). In a second phase, we gather qualitative data to detect which type of map favors decision-making. On a quantitative level, our results show that certain types of maps work better to show global trends, while other types are more useful when analyzing regional trends or detecting the regions that fit a specific pattern. On a qualitative level, those representations which are already familiar to the user are often better valued despite having lower measured success rates.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Both in digital and print media, it is common to use static maps to show the evolution of values in various regions over time. The ability to communicate local or global trends, while reducing the cognitive load on readers, is of vital importance for an audience that is not always well versed in map interpretation. This study aims to measure the efficiency of four static maps (choropleth, tile grid map and their banded versions) to test their usefulness in presenting changes over time from a user experience perspective. We first evaluate the effectiveness of these map types by quantitative performance analysis (time and success rates). In a second phase, we gather qualitative data to detect which type of map favors decision-making. On a quantitative level, our results show that certain types of maps work better to show global trends, while other types are more useful when analyzing regional trends or detecting the regions that fit a specific pattern. On a qualitative level, those representations which are already familiar to the user are often better valued despite having lower measured success rates.",
"title": "Measuring the Effectiveness of Static Maps to Communicate Changes over Time",
"normalizedTitle": "Measuring the Effectiveness of Static Maps to Communicate Changes over Time",
"fno": "09827962",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Task Analysis",
"Visualization",
"Market Research",
"Media",
"Shape",
"Urban Areas",
"Time Measurement",
"Information Visualization",
"Cognition",
"Static Maps",
"User Interfaces",
"Perception"
],
"authors": [
{
"givenName": "Luz",
"surname": "Calvo",
"fullName": "Luz Calvo",
"affiliation": "CASE DepartmentBarcelona Supercomputing Center (BSC)",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Fernando",
"surname": "Cucchietti",
"fullName": "Fernando Cucchietti",
"affiliation": "CASE DepartmentBarcelona Supercomputing Center (BSC)",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mario",
"surname": "Pérez-Montoro",
"fullName": "Mario Pérez-Montoro",
"affiliation": "Department of Library and Information Science and Audiovisual CommunicationUniversity of Barcelona",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-07-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2009/3733/0/3733a403",
"title": "Treemaps and Choropleth Maps Applied to Regional Hierarchical Statistical Data",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2009/3733a403/12OmNBqv2d4",
"parentPublication": {
"id": "proceedings/iv/2009/3733/0",
"title": "2009 13th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2016/9036/0/9036a290",
"title": "Visualization of Statistics from MEDLINE®",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2016/9036a290/12OmNqGA51k",
"parentPublication": {
"id": "proceedings/cbms/2016/9036/0",
"title": "2016 IEEE 29th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2015/7644/0/7644a978",
"title": "The Identification of Agglomeration Trends of Airport Foreign Firms",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2015/7644a978/12OmNyo1o67",
"parentPublication": {
"id": "proceedings/icicta/2015/7644/0",
"title": "2015 8th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asonam/2016/2846/0/07752344",
"title": "Detecting sex trafficking circuits in the U.S. through analysis of online escort advertisements",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2016/07752344/12OmNz6Apb1",
"parentPublication": {
"id": "proceedings/asonam/2016/2846/0",
"title": "2016 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07536141",
"title": "Quantifying the Visual Impact of Classification Boundaries in Choropleth Maps",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07536141/13rRUxBa5c4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010060881",
"title": "Necklace Maps",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010060881/13rRUxjyX3S",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2022/8045/0/10020695",
"title": "The Impact of COVID-19 on Human Mobility: A Case Study on New York",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2022/10020695/1KfRasxXbpe",
"parentPublication": {
"id": "proceedings/big-data/2022/8045/0",
"title": "2022 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv-2/2019/2850/0/285000a056",
"title": "ChoroLibre: Supporting Georeferenced Demographic Information Visualization Through Hierarchical Choropleth Maps",
"doi": null,
"abstractUrl": "/proceedings-article/iv-2/2019/285000a056/1cMEPB7kF1K",
"parentPublication": {
"id": "proceedings/iv-2/2019/2850/0",
"title": "2019 23rd International Conference in Information Visualization – Part II",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933654",
"title": "Time Varying Predominance Tag Maps",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933654/1fTgI2WaxuE",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2021/9184/0/918400c535",
"title": "GeoDart: A System for Discovering Maps Discrepancies",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2021/918400c535/1uGXBygAPjW",
"parentPublication": {
"id": "proceedings/icde/2021/9184/0",
"title": "2021 IEEE 37th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09826389",
"articleId": "1EVdDTX0i2I",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09834145",
"articleId": "1FapOsLgEik",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1EYxory3q1O",
"name": "ttg555501-09827962s1-supp1-3188940.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09827962s1-supp1-3188940.pdf",
"extension": "pdf",
"size": "1.16 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1EVdDTX0i2I",
"doi": "10.1109/TVCG.2022.3189883",
"abstract": "Scatterplots are among the most widely used visualization techniques. Compelling scatterplot visualizations improve understanding of data by leveraging visual perception to boost awareness when performing specific visual analytic tasks. Design choices in scatterplots, such as graphical encodings or data aspects, can directly impact decision-making quality for low-level tasks like clustering. Hence, constructing frameworks that consider both the perceptions of the visual encodings and the task being performed enables optimizing visualizations to maximize efficacy. In this paper, we propose an automatic tool to optimize the design factors of scatterplots to reveal the most salient cluster structure. Our approach leverages the merge tree data structure to identify the clusters and optimize the choice of subsampling algorithm, sampling rate, marker size, and marker opacity used to generate a scatterplot image. We validate our approach with user and case studies that show it efficiently provides high-quality scatterplot designs from a large parameter space.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Scatterplots are among the most widely used visualization techniques. Compelling scatterplot visualizations improve understanding of data by leveraging visual perception to boost awareness when performing specific visual analytic tasks. Design choices in scatterplots, such as graphical encodings or data aspects, can directly impact decision-making quality for low-level tasks like clustering. Hence, constructing frameworks that consider both the perceptions of the visual encodings and the task being performed enables optimizing visualizations to maximize efficacy. In this paper, we propose an automatic tool to optimize the design factors of scatterplots to reveal the most salient cluster structure. Our approach leverages the merge tree data structure to identify the clusters and optimize the choice of subsampling algorithm, sampling rate, marker size, and marker opacity used to generate a scatterplot image. We validate our approach with user and case studies that show it efficiently provides high-quality scatterplot designs from a large parameter space.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Scatterplots are among the most widely used visualization techniques. Compelling scatterplot visualizations improve understanding of data by leveraging visual perception to boost awareness when performing specific visual analytic tasks. Design choices in scatterplots, such as graphical encodings or data aspects, can directly impact decision-making quality for low-level tasks like clustering. Hence, constructing frameworks that consider both the perceptions of the visual encodings and the task being performed enables optimizing visualizations to maximize efficacy. In this paper, we propose an automatic tool to optimize the design factors of scatterplots to reveal the most salient cluster structure. Our approach leverages the merge tree data structure to identify the clusters and optimize the choice of subsampling algorithm, sampling rate, marker size, and marker opacity used to generate a scatterplot image. We validate our approach with user and case studies that show it efficiently provides high-quality scatterplot designs from a large parameter space.",
"title": "Automatic Scatterplot Design Optimization for Clustering Identification",
"normalizedTitle": "Automatic Scatterplot Design Optimization for Clustering Identification",
"fno": "09826389",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visualization",
"Task Analysis",
"Encoding",
"Data Visualization",
"Clustering Algorithms",
"Design Optimization",
"Image Color Analysis",
"Scatterplot",
"Overdraw",
"Clustering",
"Design Optimization",
"Perception",
"Topological Data Analysis"
],
"authors": [
{
"givenName": "Ghulam Jilani",
"surname": "Quadri",
"fullName": "Ghulam Jilani Quadri",
"affiliation": "Department of Computer Science, University of North Carolina, Chapel Hill",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jennifer Adorno",
"surname": "Nieves",
"fullName": "Jennifer Adorno Nieves",
"affiliation": "Department of Computer Science and Engineering, University of South Florida, Tampa",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Brenton M.",
"surname": "Wiernik",
"fullName": "Brenton M. Wiernik",
"affiliation": "Department of Psychology, University of South Florida, Tampa",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Paul",
"surname": "Rosen",
"fullName": "Paul Rosen",
"affiliation": "Scientific Computing and Imaging Institute, University of Utah, Salt Lake City",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-07-01 00:00:00",
"pubType": "trans",
"pages": "1-16",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2012/11/ttg2012111969",
"title": "3D Scatterplot Navigation",
"doi": null,
"abstractUrl": "/journal/tg/2012/11/ttg2012111969/13rRUB6Sq0y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/12/ttg2013122634",
"title": "Empirical Guidance on Scatterplot and Dimension Reduction Technique Choices",
"doi": null,
"abstractUrl": "/journal/tg/2013/12/ttg2013122634/13rRUEgs2BW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/10/ttg2013101768",
"title": "The Generalized Sensitivity Scatterplot",
"doi": null,
"abstractUrl": "/journal/tg/2013/10/ttg2013101768/13rRUwbs2gs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2010/06/mcg2010060003",
"title": "Improved Scatterplot Design",
"doi": null,
"abstractUrl": "/magazine/cg/2010/06/mcg2010060003/13rRUwjoNCc",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017602",
"title": "Scatterplots: Tasks, Data, and Designs",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017602/13rRUy3gn7C",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/09/07332976",
"title": "The Connected Scatterplot for Presenting Paired Time Series",
"doi": null,
"abstractUrl": "/journal/tg/2016/09/07332976/13rRUyY294F",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09925049",
"title": "Dual Space Coupling Model Guided Overlap-Free Scatterplot",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09925049/1HBHYSHqD3a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2019/9226/0/922600a082",
"title": "Scatterplot Summarization by Constructing Fast and Robust Principal Graphs from Skeletons",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2019/922600a082/1cMF8150We4",
"parentPublication": {
"id": "proceedings/pacificvis/2019/9226/0",
"title": "2019 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222038",
"title": "Kyrix-S: Authoring Scalable Scatterplot Visualizations of Big Data",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222038/1nTq1lYLbEY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222295",
"title": "Modeling the Influence of Visual Density on Cluster Perception in Scatterplots Using Topology",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222295/1nTqtC45a12",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09825626",
"articleId": "1EQeZhjlQDm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09827962",
"articleId": "1EWSvmlatmU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1EQeZhjlQDm",
"doi": "10.1109/TVCG.2022.3189569",
"abstract": "Origami architecture (OA) is a fascinating papercraft that involves only a piece of paper with cuts and folds. Interesting geometric structures ‘pop up’ when the paper is opened. However, manually designing such a physically valid 2D paper pop-up plan is challenging since fold lines must jointly satisfy hard spatial constraints. Existing works on automatic OA-style paper pop-up design all focused on how to generate a pop-up structure that approximates a given target 3D model. This paper presents the first OA-style paper pop-up design framework that takes 2D images instead of 3D models as input. Our work is inspired by the fact that artists often use 2D profiles to guide the design process, thus benefited from the high availability of 2D image resources. Due to the lack of 3D geometry information, we perform novel theoretic analysis to ensure the foldability and stability of the resultant design. Based on a novel graph representation of the paper pop-up plan, we further propose a practical optimization algorithm via mixed-integer programming that jointly optimizes the topology and geometry of the 2D plan. We also allow the user to interactively explore the design space by specifying constraints on fold lines. Finally, we evaluate our framework on various images with interesting 2D shapes. Experiments and comparisons exhibit both the efficacy and efficiency of our framework.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Origami architecture (OA) is a fascinating papercraft that involves only a piece of paper with cuts and folds. Interesting geometric structures ‘pop up’ when the paper is opened. However, manually designing such a physically valid 2D paper pop-up plan is challenging since fold lines must jointly satisfy hard spatial constraints. Existing works on automatic OA-style paper pop-up design all focused on how to generate a pop-up structure that approximates a given target 3D model. This paper presents the first OA-style paper pop-up design framework that takes 2D images instead of 3D models as input. Our work is inspired by the fact that artists often use 2D profiles to guide the design process, thus benefited from the high availability of 2D image resources. Due to the lack of 3D geometry information, we perform novel theoretic analysis to ensure the foldability and stability of the resultant design. Based on a novel graph representation of the paper pop-up plan, we further propose a practical optimization algorithm via mixed-integer programming that jointly optimizes the topology and geometry of the 2D plan. We also allow the user to interactively explore the design space by specifying constraints on fold lines. Finally, we evaluate our framework on various images with interesting 2D shapes. Experiments and comparisons exhibit both the efficacy and efficiency of our framework.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Origami architecture (OA) is a fascinating papercraft that involves only a piece of paper with cuts and folds. Interesting geometric structures ‘pop up’ when the paper is opened. However, manually designing such a physically valid 2D paper pop-up plan is challenging since fold lines must jointly satisfy hard spatial constraints. Existing works on automatic OA-style paper pop-up design all focused on how to generate a pop-up structure that approximates a given target 3D model. This paper presents the first OA-style paper pop-up design framework that takes 2D images instead of 3D models as input. Our work is inspired by the fact that artists often use 2D profiles to guide the design process, thus benefited from the high availability of 2D image resources. Due to the lack of 3D geometry information, we perform novel theoretic analysis to ensure the foldability and stability of the resultant design. Based on a novel graph representation of the paper pop-up plan, we further propose a practical optimization algorithm via mixed-integer programming that jointly optimizes the topology and geometry of the 2D plan. We also allow the user to interactively explore the design space by specifying constraints on fold lines. Finally, we evaluate our framework on various images with interesting 2D shapes. Experiments and comparisons exhibit both the efficacy and efficiency of our framework.",
"title": "Image-Based OA-style Paper Pop-up Design via Mixed-Integer Programming",
"normalizedTitle": "Image-Based OA-style Paper Pop-up Design via Mixed-Integer Programming",
"fno": "09825626",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Three Dimensional Displays",
"Shape",
"Stability Analysis",
"Computational Modeling",
"Geometry",
"Solid Modeling",
"Optimization",
"Origami Architecture",
"Paper Pop Up",
"Image Based Design",
"Foldable Structure",
"Mixed Integer Programming"
],
"authors": [
{
"givenName": "Fei",
"surname": "Huang",
"fullName": "Fei Huang",
"affiliation": "Department of Computer Science, University of Bath, Bath, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chen",
"surname": "Liu",
"fullName": "Chen Liu",
"affiliation": "Meta - Facebook Reality Labs, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kai-Wen",
"surname": "Hsiao",
"fullName": "Kai-Wen Hsiao",
"affiliation": "Department of Computer Science, National Tsing Hua University, HsinChu, Taiwan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ying-Miao",
"surname": "Kuo",
"fullName": "Ying-Miao Kuo",
"affiliation": "Department of Computer Science, National Tsing Hua University, HsinChu, Taiwan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hung-Kuo",
"surname": "Chu",
"fullName": "Hung-Kuo Chu",
"affiliation": "Department of Computer Science, National Tsing Hua University, HsinChu, Taiwan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yong-Liang",
"surname": "Yang",
"fullName": "Yong-Liang Yang",
"affiliation": "Department of Computer Science, University of Bath, Bath, UK",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-07-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2011/0063/0/06130294",
"title": "Revisiting 3D geometric models for accurate object shape and pose",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130294/12OmNAOsMKB",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a927",
"title": "Single Image Pop-Up from Discriminatively Learned Parts",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a927/12OmNxX3uwA",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/02/ttg2014020276",
"title": "Surface and contour-preserving origamic architecture paper pop-ups",
"doi": null,
"abstractUrl": "/journal/tg/2014/02/ttg2014020276/13rRUxYrbUH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/11/ttg2013111795",
"title": "Automatic Paper Sliceform Design from 3D Solid Models",
"doi": null,
"abstractUrl": "/journal/tg/2013/11/ttg2013111795/13rRUxlgy3H",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500d917",
"title": "PoP-Net: Pose over Parts Network for Multi-Person 3D Pose Estimation from a Depth Image",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500d917/1B13ndNQ3N6",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigmm/2019/5527/0/552700a404",
"title": "A Framework for the Conversion of Textual BigData into 2D Architectural Floor Plan",
"doi": null,
"abstractUrl": "/proceedings-article/bigmm/2019/552700a404/1fHjJjSRij6",
"parentPublication": {
"id": "proceedings/bigmm/2019/5527/0",
"title": "2019 IEEE Fifth International Conference on Multimedia Big Data (BigMM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2016/5188/0/5188a036",
"title": "Human Body Reshaping Based on Images",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2016/5188a036/1fw1QAw1Vle",
"parentPublication": {
"id": "proceedings/icvrv/2016/5188/0",
"title": "2016 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2020/7081/0/708100a797",
"title": "Study on Performance Optimization of Typical Area with Pop-Up Hood System",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2020/708100a797/1iERLXp8zdu",
"parentPublication": {
"id": "proceedings/icmtma/2020/7081/0",
"title": "2020 12th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2020/8961/0/09274002",
"title": "Pop-up Makerspace Module in Undergraduate Studies Inculcating Entrepreneurial Mindset",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2020/09274002/1phRNShZ3Z6",
"parentPublication": {
"id": "proceedings/fie/2020/8961/0",
"title": "2020 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2021/1865/0/186500a223",
"title": "Pop’n Food: 3D Food Model Estimation System from a Single Image",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2021/186500a223/1xPsodJpVfi",
"parentPublication": {
"id": "proceedings/mipr/2021/1865/0",
"title": "2021 IEEE 4th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09817818",
"articleId": "1EOA9Th02TS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09826389",
"articleId": "1EVdDTX0i2I",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1EWSw804Jqw",
"name": "ttg555501-09825626s1-supp1-3189569.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09825626s1-supp1-3189569.mp4",
"extension": "mp4",
"size": "38.1 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1EOA9Th02TS",
"doi": "10.1109/TVCG.2022.3189094",
"abstract": "Relationships in scientific data, such as the numerical and spatial distribution relations of features in univariate data, the scalar-value combinations’ relations in multivariate data, and the association of volumes in time-varying and ensemble data, are intricate and complex. This paper presents voxel2vec, a novel unsupervised representation learning model, which is used to learn distributed representations of scalar values/scalar-value combinations in a low-dimensional vector space. Its basic assumption is that if two scalar values/scalar-value combinations have similar contexts, they usually have high similarity in terms of features. By representing scalar values/scalar-value combinations as symbols, voxel2vec learns the similarity between them in the context of spatial distribution and then allows us to explore the overall association between volumes by transfer prediction. We demonstrate the usefulness and effectiveness of voxel2vec by comparing it with the isosurface similarity map of univariate data and applying the learned distributed representations to feature classification for multivariate data and to association analysis for time-varying and ensemble data.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Relationships in scientific data, such as the numerical and spatial distribution relations of features in univariate data, the scalar-value combinations’ relations in multivariate data, and the association of volumes in time-varying and ensemble data, are intricate and complex. This paper presents voxel2vec, a novel unsupervised representation learning model, which is used to learn distributed representations of scalar values/scalar-value combinations in a low-dimensional vector space. Its basic assumption is that if two scalar values/scalar-value combinations have similar contexts, they usually have high similarity in terms of features. By representing scalar values/scalar-value combinations as symbols, voxel2vec learns the similarity between them in the context of spatial distribution and then allows us to explore the overall association between volumes by transfer prediction. We demonstrate the usefulness and effectiveness of voxel2vec by comparing it with the isosurface similarity map of univariate data and applying the learned distributed representations to feature classification for multivariate data and to association analysis for time-varying and ensemble data.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Relationships in scientific data, such as the numerical and spatial distribution relations of features in univariate data, the scalar-value combinations’ relations in multivariate data, and the association of volumes in time-varying and ensemble data, are intricate and complex. This paper presents voxel2vec, a novel unsupervised representation learning model, which is used to learn distributed representations of scalar values/scalar-value combinations in a low-dimensional vector space. Its basic assumption is that if two scalar values/scalar-value combinations have similar contexts, they usually have high similarity in terms of features. By representing scalar values/scalar-value combinations as symbols, voxel2vec learns the similarity between them in the context of spatial distribution and then allows us to explore the overall association between volumes by transfer prediction. We demonstrate the usefulness and effectiveness of voxel2vec by comparing it with the isosurface similarity map of univariate data and applying the learned distributed representations to feature classification for multivariate data and to association analysis for time-varying and ensemble data.",
"title": "voxel2vec: A Natural Language Processing Approach to Learning Distributed Representations for Scientific Data",
"normalizedTitle": "voxel2vec: A Natural Language Processing Approach to Learning Distributed Representations for Scientific Data",
"fno": "09817818",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Feature Extraction",
"Distributed Databases",
"Symbols",
"Representation Learning",
"Isosurfaces",
"Computational Modeling",
"Graphical Models",
"Scientific Data",
"Representation Learning",
"Feature Classification",
"Association Analysis"
],
"authors": [
{
"givenName": "Xiangyang",
"surname": "He",
"fullName": "Xiangyang He",
"affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yubo",
"surname": "Tao",
"fullName": "Yubo Tao",
"affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shuoliu",
"surname": "Yang",
"fullName": "Shuoliu Yang",
"affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haoran",
"surname": "Dai",
"fullName": "Haoran Dai",
"affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hai",
"surname": "Lin",
"fullName": "Hai Lin",
"affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-07-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/annes/1995/7174/0/71740373",
"title": "Integrating vision processing and natural language processing with a clinical application",
"doi": null,
"abstractUrl": "/proceedings-article/annes/1995/71740373/12OmNB9bvlj",
"parentPublication": {
"id": "proceedings/annes/1995/7174/0",
"title": "Proceedings 1995 Second New Zealand International Two-Stream Conference on Artificial Neural Networks and Expert Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visual/1992/2897/0/00235215",
"title": "Display of scientific data structures for algorithm visualization",
"doi": null,
"abstractUrl": "/proceedings-article/visual/1992/00235215/12OmNC8MsJ0",
"parentPublication": {
"id": "proceedings/visual/1992/2897/0",
"title": "Proceedings Visualization '92",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2017/5738/0/08031590",
"title": "Statistical visualization and analysis of large data using a value-based spatial distribution",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2017/08031590/12OmNCbCrVX",
"parentPublication": {
"id": "proceedings/pacificvis/2017/5738/0",
"title": "2017 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192697",
"title": "Association Analysis for Visual Exploration of Multivariate Scientific Data Sets",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192697/13rRUNvgz9S",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ex/1994/01/x1035",
"title": "Guest Editor's Introduction: Natural-Language Processing",
"doi": null,
"abstractUrl": "/magazine/ex/1994/01/x1035/13rRUwcAqna",
"parentPublication": {
"id": "mags/ex",
"title": "IEEE Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ex/1994/02/x2045",
"title": "Using Natural-Language Processing to Produce Weather Forecasts",
"doi": null,
"abstractUrl": "/magazine/ex/1994/02/x2045/13rRUxBJhzA",
"parentPublication": {
"id": "mags/ex",
"title": "IEEE Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2019/01/08395074",
"title": "Natural Language Processing for EHR-Based Computational Phenotyping",
"doi": null,
"abstractUrl": "/journal/tb/2019/01/08395074/17D45X2fUFf",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600k0524",
"title": "Weakly Paired Associative Learning for Sound and Image Representations via Bimodal Associative Memory",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600k0524/1H0O3inBfNu",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scivis/2018/6882/0/08823605",
"title": "Biclusters Based Visual Exploration of Multivariate Scientific Data",
"doi": null,
"abstractUrl": "/proceedings-article/scivis/2018/08823605/1d5kxtrWtBm",
"parentPublication": {
"id": "proceedings/scivis/2018/6882/0",
"title": "2018 IEEE Scientific Visualization Conference (SciVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2019/5584/0/558400a433",
"title": "Semantic Representations for Multilingual Natural Language Processing",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2019/558400a433/1jdE0ge2Ryw",
"parentPublication": {
"id": "proceedings/csci/2019/5584/0",
"title": "2019 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09815871",
"articleId": "1EMV6Kftb2g",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09825626",
"articleId": "1EQeZhjlQDm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1EWSvysOhNK",
"name": "ttg555501-09817818s1-supp1-3189094.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09817818s1-supp1-3189094.pdf",
"extension": "pdf",
"size": "3.12 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1EMV6Kftb2g",
"doi": "10.1109/TVCG.2022.3188775",
"abstract": "The level of detail (LOD) technique has been widely exploited as a key rendering optimization in many graphics applications. Numerous approaches have been proposed to automatically generate different kinds of LODs, such as geometric LOD or shader LOD. However, none of them have considered simplifying the geometry and shader at the same time. In this paper, we explore the observation that simplifications of geometric and shading details can be combined to provide a greater variety of tradeoffs between performance and quality. We present a new discrete multiresolution representation of objects, which consists of mesh and shader LODs. Each level of the representation could contain both simplified representations of shader and mesh. To create such LODs, we propose two automatic algorithms that pursue the best simplifications of meshes and shaders at adaptively selected distances. The results show that our mesh and shader LOD achieves better performance-quality tradeoffs than prior LOD representations, such as those that only consider simplified meshes or shaders.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The level of detail (LOD) technique has been widely exploited as a key rendering optimization in many graphics applications. Numerous approaches have been proposed to automatically generate different kinds of LODs, such as geometric LOD or shader LOD. However, none of them have considered simplifying the geometry and shader at the same time. In this paper, we explore the observation that simplifications of geometric and shading details can be combined to provide a greater variety of tradeoffs between performance and quality. We present a new discrete multiresolution representation of objects, which consists of mesh and shader LODs. Each level of the representation could contain both simplified representations of shader and mesh. To create such LODs, we propose two automatic algorithms that pursue the best simplifications of meshes and shaders at adaptively selected distances. The results show that our mesh and shader LOD achieves better performance-quality tradeoffs than prior LOD representations, such as those that only consider simplified meshes or shaders.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The level of detail (LOD) technique has been widely exploited as a key rendering optimization in many graphics applications. Numerous approaches have been proposed to automatically generate different kinds of LODs, such as geometric LOD or shader LOD. However, none of them have considered simplifying the geometry and shader at the same time. In this paper, we explore the observation that simplifications of geometric and shading details can be combined to provide a greater variety of tradeoffs between performance and quality. We present a new discrete multiresolution representation of objects, which consists of mesh and shader LODs. Each level of the representation could contain both simplified representations of shader and mesh. To create such LODs, we propose two automatic algorithms that pursue the best simplifications of meshes and shaders at adaptively selected distances. The results show that our mesh and shader LOD achieves better performance-quality tradeoffs than prior LOD representations, such as those that only consider simplified meshes or shaders.",
"title": "Automatic Mesh and Shader Level of Detail",
"normalizedTitle": "Automatic Mesh and Shader Level of Detail",
"fno": "09815871",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Rendering Computer Graphics",
"Optimization",
"Measurement",
"Geometry",
"Real Time Systems",
"Computational Modeling",
"Task Analysis",
"Geometry Simplification",
"Level Of Detail",
"Real Time Rendering",
"Shader Optimization"
],
"authors": [
{
"givenName": "Yuzhi",
"surname": "Liang",
"fullName": "Yuzhi Liang",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Qi",
"surname": "Song",
"fullName": "Qi Song",
"affiliation": "Booming Tech, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rui",
"surname": "Wang",
"fullName": "Rui Wang",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yuchi",
"surname": "Huo",
"fullName": "Yuchi Huo",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hujun",
"surname": "Bao",
"fullName": "Hujun Bao",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-07-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2008/1971/0/04480772",
"title": "New Rendering Approach for Composable Volumetric Lenses",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480772/12OmNBAqZId",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2016/4400/0/4400a267",
"title": "A Mesh Reconstruction Method Based on View Maps",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2016/4400a267/12OmNBoNrqY",
"parentPublication": {
"id": "proceedings/icdh/2016/4400/0",
"title": "2016 6th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/01532787",
"title": "A shader-based parallel rendering framework",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/01532787/12OmNs0C9UL",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispass/2018/5010/0/501001a219",
"title": "A Cross-platform Evaluation of Graphics Shader Compiler Optimization",
"doi": null,
"abstractUrl": "/proceedings-article/ispass/2018/501001a219/12OmNvA1hxj",
"parentPublication": {
"id": "proceedings/ispass/2018/5010/0",
"title": "2018 IEEE International Symposium on Performance Analysis of Systems and Software (ISPASS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2014/7615/0/07097916",
"title": "The design of LLVM-based shader compiler for embedded architecture",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2014/07097916/12OmNwDSdNg",
"parentPublication": {
"id": "proceedings/icpads/2014/7615/0",
"title": "2014 20th IEEE International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2011/4439/0/06032330",
"title": "On Extracting Perception-Based Features for Effective Similar Shader Retreival",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2011/06032330/12OmNxGj9NV",
"parentPublication": {
"id": "proceedings/compsac/2011/4439/0",
"title": "2011 IEEE 35th Annual Computer Software and Applications Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dim/2003/1991/0/19910466",
"title": "Streaming Transmission of Point-Sampled Geometry Based on View-Dependent Level-of-Detail",
"doi": null,
"abstractUrl": "/proceedings-article/3dim/2003/19910466/12OmNyFCw1d",
"parentPublication": {
"id": "proceedings/3dim/2003/1991/0",
"title": "3D Digital Imaging and Modeling, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2008/1966/0/04475454",
"title": "Dynamic Shader Generation for Flexible Multi-Volume Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2008/04475454/12OmNyfdOUy",
"parentPublication": {
"id": "proceedings/pacificvis/2008/1966/0",
"title": "IEEE Pacific Visualization Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/clei/2018/0437/0/043700a599",
"title": "Shader Framework Implementation for the Management of Multiple Effects",
"doi": null,
"abstractUrl": "/proceedings-article/clei/2018/043700a599/1cdP3WMvO4U",
"parentPublication": {
"id": "proceedings/clei/2018/0437/0",
"title": "2018 XLIV Latin American Computer Conference (CLEI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900l1353",
"title": "Neural Geometric Level of Detail: Real-time Rendering with Implicit 3D Shapes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900l1353/1yeInvm2ApO",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09814874",
"articleId": "1EJBn7YxwGY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09817818",
"articleId": "1EOA9Th02TS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1EOAaMFkNEs",
"name": "ttg555501-09815871s1-supp2-3188775.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09815871s1-supp2-3188775.mp4",
"extension": "mp4",
"size": "215 MB",
"__typename": "WebExtraType"
},
{
"id": "1EOAamH2oKs",
"name": "ttg555501-09815871s1-supp1-3188775.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09815871s1-supp1-3188775.pdf",
"extension": "pdf",
"size": "219 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1EJBn7YxwGY",
"doi": "10.1109/TVCG.2022.3187425",
"abstract": "We present Target Netgrams as a visualization technique for radial layouts of graphs. Inspired by manually created target sociograms, we propose an annulus-constrained stress model that aims to position nodes onto the annuli between adjacent circles for indicating their radial hierarchy, while maintaining the network structure (clusters and neighborhoods) and improving readability as much as possible. This is achieved by having more space on the annuli than traditional layout techniques. By adapting stress majorization to this model, the layout is computed as a constrained least square optimization problem. Additional constraints (e.g., parent-child preservation, attribute-based clusters and structure-aware radii) are provided for exploring nodes, edges, and levels of interest. We demonstrate the effectiveness of our method through a comprehensive evaluation, a user study, and a case study.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present Target Netgrams as a visualization technique for radial layouts of graphs. Inspired by manually created target sociograms, we propose an annulus-constrained stress model that aims to position nodes onto the annuli between adjacent circles for indicating their radial hierarchy, while maintaining the network structure (clusters and neighborhoods) and improving readability as much as possible. This is achieved by having more space on the annuli than traditional layout techniques. By adapting stress majorization to this model, the layout is computed as a constrained least square optimization problem. Additional constraints (e.g., parent-child preservation, attribute-based clusters and structure-aware radii) are provided for exploring nodes, edges, and levels of interest. We demonstrate the effectiveness of our method through a comprehensive evaluation, a user study, and a case study.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present Target Netgrams as a visualization technique for radial layouts of graphs. Inspired by manually created target sociograms, we propose an annulus-constrained stress model that aims to position nodes onto the annuli between adjacent circles for indicating their radial hierarchy, while maintaining the network structure (clusters and neighborhoods) and improving readability as much as possible. This is achieved by having more space on the annuli than traditional layout techniques. By adapting stress majorization to this model, the layout is computed as a constrained least square optimization problem. Additional constraints (e.g., parent-child preservation, attribute-based clusters and structure-aware radii) are provided for exploring nodes, edges, and levels of interest. We demonstrate the effectiveness of our method through a comprehensive evaluation, a user study, and a case study.",
"title": "Target Netgrams: An Annulus-Constrained Stress Model for Radial Graph Visualization",
"normalizedTitle": "Target Netgrams: An Annulus-Constrained Stress Model for Radial Graph Visualization",
"fno": "09814874",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Layout",
"Stress",
"Visualization",
"Data Visualization",
"Computational Modeling",
"Adaptation Models",
"Task Analysis",
"Radial Visualization",
"Stress Model",
"Hierarchy Constraint",
"Graph"
],
"authors": [
{
"givenName": "Mingliang",
"surname": "Xue",
"fullName": "Mingliang Xue",
"affiliation": "Department of Computer Science, Shandong University, Jinan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yunhai",
"surname": "Wang",
"fullName": "Yunhai Wang",
"affiliation": "Department of Computer Science, Shandong University, Jinan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chang",
"surname": "Han",
"fullName": "Chang Han",
"affiliation": "Department of Computer Science, Shandong University, Jinan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jian",
"surname": "Zhang",
"fullName": "Jian Zhang",
"affiliation": "Computer Network Information Center Chinese Academy of Sciences, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zheng",
"surname": "Wang",
"fullName": "Zheng Wang",
"affiliation": "China Information Consulting & Designing Institute Co., Ltd (CITC), Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kaiyi",
"surname": "Zhang",
"fullName": "Kaiyi Zhang",
"affiliation": "Department of Computer Science, Shandong University, Jinan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Christophe",
"surname": "Hurter",
"fullName": "Christophe Hurter",
"affiliation": "ENAC, Ecole National de l’Aviation Civile, Toulouse, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jian",
"surname": "Zhao",
"fullName": "Jian Zhao",
"affiliation": "Cheriton School of Computer Science, University of Waterloo, Waterloo, ON, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Oliver",
"surname": "Deussen",
"fullName": "Oliver Deussen",
"affiliation": "Computer and Information Science, University of Konstanz, Konstanz, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-07-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2013/5049/0/5049a051",
"title": "Radial Layered Matrix Visualization of Dynamic Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2013/5049a051/12OmNxHJ9om",
"parentPublication": {
"id": "proceedings/iv/2013/5049/0",
"title": "2013 17th International Conference on Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icfcse/2011/1562/0/06041657",
"title": "Study on Lumbar Rotated and Localized Manipulation on Stress Based on Finite Element Method",
"doi": null,
"abstractUrl": "/proceedings-article/icfcse/2011/06041657/12OmNzwpU7O",
"parentPublication": {
"id": "proceedings/icfcse/2011/1562/0",
"title": "2011 International Conference on Future Computer Science and Education",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/05/v0821",
"title": "IPSep-CoLa: An Incremental Procedure for Separation Constraint Layout of Graphs",
"doi": null,
"abstractUrl": "/journal/tg/2006/05/v0821/13rRUNvyat8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/06/ttg2009061399",
"title": "Stress Tensor Field Visualization for Implant Planning in Orthopedics",
"doi": null,
"abstractUrl": "/journal/tg/2009/06/ttg2009061399/13rRUwI5U2B",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017634",
"title": "Revisiting Stress Majorization as a Unified Framework for Interactive Constrained Graph Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017634/13rRUxC0Sw3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/si/2012/03/05703168",
"title": "A Framework for Layout-dependent STI Stress Analysis and Stress-aware Circuit Optimization",
"doi": null,
"abstractUrl": "/journal/si/2012/03/05703168/13rRUxNEqNc",
"parentPublication": {
"id": "trans/si",
"title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/05/07889042",
"title": "Drawing Large Graphs by Multilevel Maxent-Stress Optimization",
"doi": null,
"abstractUrl": "/journal/tg/2018/05/07889042/13rRUxYINfn",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2018/7202/0/720200a096",
"title": "Radial Calendar of Consumption",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2018/720200a096/17D45XvMcd7",
"parentPublication": {
"id": "proceedings/iv/2018/7202/0",
"title": "2018 22nd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08807238",
"title": "A Comparison of Radial and Linear Charts for Visualizing Daily Patterns",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08807238/1cG66qf6MKs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2020/9134/0/913400a409",
"title": "Optimizing a radial visualization with a genetic algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2020/913400a409/1rSRd8jh960",
"parentPublication": {
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09806341",
"articleId": "1Et0iwB480M",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09815871",
"articleId": "1EMV6Kftb2g",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Et0iwB480M",
"doi": "10.1109/TVCG.2022.3186146",
"abstract": "Cryo-electron tomography (cryo-ET) is a new 3D imaging technique with unprecedented potential for resolving submicron structural details. Existing volume visualization methods, however, are not able to reveal details of interest due to low signal-to-noise ratio. In order to design more powerful transfer functions, we propose leveraging soft segmentation as an explicit component of visualization for noisy volumes. Our technical realization is based on semi-supervised learning, where we combine the advantages of two segmentation algorithms. First, the weak segmentation algorithm provides good results for propagating sparse user-provided labels to other voxels in the same volume and is used to generate dense pseudo-labels. Second, the powerful deep-learning-based segmentation algorithm learns from these pseudo-labels to generalize the segmentation to other unseen volumes, a task that the weak segmentation algorithm fails at completely. The proposed volume visualization uses deep-learning-based segmentation as a component for segmentation-aware transfer function design. Appropriate ramp parameters can be suggested automatically through frequency distribution analysis. Furthermore, our visualization uses gradient-free ambient occlusion shading to further suppress the visual presence of noise, and to give structural detail the desired prominence. The cryo-ET data studied in our technical experiments are based on the highest-quality tilted series of intact SARS-CoV-2 virions. Our technique shows the high impact in target sciences for visual data analysis of very noisy volumes that cannot be visualized with existing techniques.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Cryo-electron tomography (cryo-ET) is a new 3D imaging technique with unprecedented potential for resolving submicron structural details. Existing volume visualization methods, however, are not able to reveal details of interest due to low signal-to-noise ratio. In order to design more powerful transfer functions, we propose leveraging soft segmentation as an explicit component of visualization for noisy volumes. Our technical realization is based on semi-supervised learning, where we combine the advantages of two segmentation algorithms. First, the weak segmentation algorithm provides good results for propagating sparse user-provided labels to other voxels in the same volume and is used to generate dense pseudo-labels. Second, the powerful deep-learning-based segmentation algorithm learns from these pseudo-labels to generalize the segmentation to other unseen volumes, a task that the weak segmentation algorithm fails at completely. The proposed volume visualization uses deep-learning-based segmentation as a component for segmentation-aware transfer function design. Appropriate ramp parameters can be suggested automatically through frequency distribution analysis. Furthermore, our visualization uses gradient-free ambient occlusion shading to further suppress the visual presence of noise, and to give structural detail the desired prominence. The cryo-ET data studied in our technical experiments are based on the highest-quality tilted series of intact SARS-CoV-2 virions. Our technique shows the high impact in target sciences for visual data analysis of very noisy volumes that cannot be visualized with existing techniques.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Cryo-electron tomography (cryo-ET) is a new 3D imaging technique with unprecedented potential for resolving submicron structural details. Existing volume visualization methods, however, are not able to reveal details of interest due to low signal-to-noise ratio. In order to design more powerful transfer functions, we propose leveraging soft segmentation as an explicit component of visualization for noisy volumes. Our technical realization is based on semi-supervised learning, where we combine the advantages of two segmentation algorithms. First, the weak segmentation algorithm provides good results for propagating sparse user-provided labels to other voxels in the same volume and is used to generate dense pseudo-labels. Second, the powerful deep-learning-based segmentation algorithm learns from these pseudo-labels to generalize the segmentation to other unseen volumes, a task that the weak segmentation algorithm fails at completely. The proposed volume visualization uses deep-learning-based segmentation as a component for segmentation-aware transfer function design. Appropriate ramp parameters can be suggested automatically through frequency distribution analysis. Furthermore, our visualization uses gradient-free ambient occlusion shading to further suppress the visual presence of noise, and to give structural detail the desired prominence. The cryo-ET data studied in our technical experiments are based on the highest-quality tilted series of intact SARS-CoV-2 virions. Our technique shows the high impact in target sciences for visual data analysis of very noisy volumes that cannot be visualized with existing techniques.",
"title": "Finding Nano-Ötzi: Cryo-Electron Tomography Visualization Guided by Learned Segmentation",
"normalizedTitle": "Finding Nano-Ötzi: Cryo-Electron Tomography Visualization Guided by Learned Segmentation",
"fno": "09806341",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Image Segmentation",
"Visualization",
"Task Analysis",
"Three Dimensional Displays",
"Signal To Noise Ratio",
"Noise Measurement",
"Volume Rendering",
"Computer Graphics Techniques",
"Machine Learning Techniques",
"Scalar Field Data",
"Life Sciences"
],
"authors": [
{
"givenName": "Ngan",
"surname": "Nguyen",
"fullName": "Ngan Nguyen",
"affiliation": "King Abdullah University of Science and Technology, Thuwal, Saudi Arabia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ciril",
"surname": "Bohak",
"fullName": "Ciril Bohak",
"affiliation": "King Abdullah University of Science and Technology, Thuwal, Saudi Arabia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dominik",
"surname": "Engel",
"fullName": "Dominik Engel",
"affiliation": "Ulm University, Ulm, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Peter",
"surname": "Mindek",
"fullName": "Peter Mindek",
"affiliation": "TU Wien and Nanographics GmbH, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ondrej",
"surname": "Strnad",
"fullName": "Ondrej Strnad",
"affiliation": "King Abdullah University of Science and Technology, Thuwal, Saudi Arabia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Peter",
"surname": "Wonka",
"fullName": "Peter Wonka",
"affiliation": "King Abdullah University of Science and Technology, Thuwal, Saudi Arabia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sai",
"surname": "Li",
"fullName": "Sai Li",
"affiliation": "Tsinghua University School of Life Sciences, Bejing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Timo",
"surname": "Ropinski",
"fullName": "Timo Ropinski",
"affiliation": "Ulm University, Ulm, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ivan",
"surname": "Viola",
"fullName": "Ivan Viola",
"affiliation": "King Abdullah University of Science and Technology, Thuwal, Saudi Arabia",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-06-01 00:00:00",
"pubType": "trans",
"pages": "1-18",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icpads/2009/3900/0/3900a384",
"title": "Modified Simultaneous Algebraic Reconstruction Technique and its Parallelization in Cryo-electron Tomography",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2009/3900a384/12OmNyS6RAw",
"parentPublication": {
"id": "proceedings/icpads/2009/3900/0",
"title": "Parallel and Distributed Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2018/5488/0/08621363",
"title": "Feature Decomposition Based Saliency Detection in Electron Cryo-Tomograms",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2018/08621363/17D45XoXP4S",
"parentPublication": {
"id": "proceedings/bibm/2018/5488/0",
"title": "2018 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2021/0126/0/09669318",
"title": "Tracing Filaments in Simulated 3D Cryo-Electron Tomography Maps Using a Fast Dynamic Programming Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2021/09669318/1A9VqrWnZPG",
"parentPublication": {
"id": "proceedings/bibm/2021/0126/0",
"title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500d260",
"title": "Weakly Supervised Learning for Joint Image Denoising and Protein Localization in Cryo-Electron Microscopy",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500d260/1B13OFswsow",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/2.812E41",
"title": "Self-Supervised Cryo-Electron Tomography Volumetric Image Restoration from Single Noisy Volume with Sparsity Constraint",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/2.812E41/1BmL53XVH0c",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aipr/2021/2471/0/09762209",
"title": "Practical Analysis of Macromolecule Identity from Cryo-electron Tomography Images using Deep Learning",
"doi": null,
"abstractUrl": "/proceedings-article/aipr/2021/09762209/1CT9aP80A1i",
"parentPublication": {
"id": "proceedings/aipr/2021/2471/0",
"title": "2021 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2022/6819/0/09994879",
"title": "DeepTracer-Denoising: Deep Learning for 3D Electron Density Map Denoising",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2022/09994879/1JC247wbYSQ",
"parentPublication": {
"id": "proceedings/bibm/2022/6819/0",
"title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2020/6215/0/09313180",
"title": "An Unsupervised Iterative Model for Single-Particle Cryo-EM Image Denoising Based on Siamese Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2020/09313180/1qmfINMKxTW",
"parentPublication": {
"id": "proceedings/bibm/2020/6215/0",
"title": "2020 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2020/6215/0/09313185",
"title": "Efficient Cryo-Electron Tomogram Simulation of Macromolecular Crowding with Application to SARS-CoV-2",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2020/09313185/1qmfUw4iPgA",
"parentPublication": {
"id": "proceedings/bibm/2020/6215/0",
"title": "2020 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2022/01/09380401",
"title": "Macromolecules Structural Classification With a 3D Dilated Dense Network in Cryo-Electron Tomography",
"doi": null,
"abstractUrl": "/journal/tb/2022/01/09380401/1s2FZRmSST6",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09804851",
"articleId": "1ErlpBk8JBS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09814874",
"articleId": "1EJBn7YxwGY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1Ey4sOHAOZy",
"name": "ttg555501-09806341s1-supp2-3186146.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09806341s1-supp2-3186146.mp4",
"extension": "mp4",
"size": "200 MB",
"__typename": "WebExtraType"
},
{
"id": "1Ey4td2Q1AA",
"name": "ttg555501-09806341s1-supp1-3186146.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09806341s1-supp1-3186146.pdf",
"extension": "pdf",
"size": "10.7 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1ErlpBk8JBS",
"doi": "10.1109/TVCG.2022.3185247",
"abstract": "Point cloud shape completion plays a central role in diverse 3D vision and robotics applications. Early methods used to generate global shapes without local detail refinement. Current methods tend to leverage local features to preserve the observed geometric details. However, they usually adopt the convolutional architecture over the incomplete point cloud to extract local features to restore the diverse information of both latent shape skeleton and geometric details, where long-distance correlation among the skeleton and details is ignored. In this work, we present a coarse-to-fine completion framework, which makes full use of both neighboring and long-distance region cues for point cloud completion. Our network leverages a Skeleton-Detail Transformer, which contains cross-attention and self-attention layers, to fully explore the correlation from local patterns to global shape and utilize it to enhance the overall skeleton. Also, we propose a selective attention mechanism to save memory usage in the attention process without significantly affecting performance. We conduct extensive experiments on the ShapeNet dataset and real-scanned datasets. Qualitative and quantitative evaluations demonstrate that our proposed network outperforms current state-of-the-art methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Point cloud shape completion plays a central role in diverse 3D vision and robotics applications. Early methods used to generate global shapes without local detail refinement. Current methods tend to leverage local features to preserve the observed geometric details. However, they usually adopt the convolutional architecture over the incomplete point cloud to extract local features to restore the diverse information of both latent shape skeleton and geometric details, where long-distance correlation among the skeleton and details is ignored. In this work, we present a coarse-to-fine completion framework, which makes full use of both neighboring and long-distance region cues for point cloud completion. Our network leverages a Skeleton-Detail Transformer, which contains cross-attention and self-attention layers, to fully explore the correlation from local patterns to global shape and utilize it to enhance the overall skeleton. Also, we propose a selective attention mechanism to save memory usage in the attention process without significantly affecting performance. We conduct extensive experiments on the ShapeNet dataset and real-scanned datasets. Qualitative and quantitative evaluations demonstrate that our proposed network outperforms current state-of-the-art methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Point cloud shape completion plays a central role in diverse 3D vision and robotics applications. Early methods used to generate global shapes without local detail refinement. Current methods tend to leverage local features to preserve the observed geometric details. However, they usually adopt the convolutional architecture over the incomplete point cloud to extract local features to restore the diverse information of both latent shape skeleton and geometric details, where long-distance correlation among the skeleton and details is ignored. In this work, we present a coarse-to-fine completion framework, which makes full use of both neighboring and long-distance region cues for point cloud completion. Our network leverages a Skeleton-Detail Transformer, which contains cross-attention and self-attention layers, to fully explore the correlation from local patterns to global shape and utilize it to enhance the overall skeleton. Also, we propose a selective attention mechanism to save memory usage in the attention process without significantly affecting performance. We conduct extensive experiments on the ShapeNet dataset and real-scanned datasets. Qualitative and quantitative evaluations demonstrate that our proposed network outperforms current state-of-the-art methods.",
"title": "Point Cloud Completion Via Skeleton-Detail Transformer",
"normalizedTitle": "Point Cloud Completion Via Skeleton-Detail Transformer",
"fno": "09804851",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Skeleton",
"Shape",
"Transformers",
"Point Cloud Compression",
"Three Dimensional Displays",
"Correlation",
"Task Analysis",
"Point Cloud",
"Point Cloud Completion",
"Shape Completion"
],
"authors": [
{
"givenName": "Wenxiao",
"surname": "Zhang",
"fullName": "Wenxiao Zhang",
"affiliation": "School of Computer Science, Wuhan University, Wuhan, Hubei, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huajian",
"surname": "zhou",
"fullName": "Huajian zhou",
"affiliation": "School of Computer Science, Wuhan University, Wuhan, Hubei, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhen",
"surname": "Dong",
"fullName": "Zhen Dong",
"affiliation": "State Key Laboratory of Information Engineering in Surveying, Mapping and Remote Sensing, Wuhan University, Wuhan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jun",
"surname": "Liu",
"fullName": "Jun Liu",
"affiliation": "Singapore University of Technology and Design, Singapore",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Qingan",
"surname": "Yan",
"fullName": "Qingan Yan",
"affiliation": "InnoPeak Technology, Inc. 2479 E Bayshore Rd, Palo Alto, CA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chunxia",
"surname": "Xiao",
"fullName": "Chunxia Xiao",
"affiliation": "School of Computer Science, Wuhan University, Wuhan, Hubei, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-06-01 00:00:00",
"pubType": "trans",
"pages": "1-14",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tp/2023/01/09735342",
"title": "PMP-Net++: Point Cloud Completion by Transformer-Enhanced Multi-Step Point Moving Paths",
"doi": null,
"abstractUrl": "/journal/tp/2023/01/09735342/1BLmVZBJX6o",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200f479",
"title": "SnowflakeNet: Point Cloud Completion by Snowflake Point Deconvolution with Skip-Transformer",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200f479/1BmL45zCYda",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859781",
"title": "Hierarchical Graph Convolutional Skeleton Transformer for Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859781/1G9DN3HTea4",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859772",
"title": "LGP-Net: Local Geometry Preserving Network for Point Cloud Completion",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859772/1G9EQKPLOpO",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600b716",
"title": "LAKe-Net: Topology-Aware Point Cloud Completion by Localizing Aligned Keypoints",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600b716/1H0Kwo5tABi",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/05/09928787",
"title": "Snowflake Point Deconvolution for Point Cloud Completion and Generation With Skip-Transformer",
"doi": null,
"abstractUrl": "/journal/tp/2023/05/09928787/1HL9mk8rEKk",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10015045",
"title": "CSDN: Cross-Modal Shape-Transfer Dual-Refinement Network for Point Cloud Completion",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10015045/1JR6dVW7wJi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscsic/2022/5488/0/548800a159",
"title": "MLFT-Net: Point Cloud Completion Using Multi-Level Feature Transformer",
"doi": null,
"abstractUrl": "/proceedings-article/iscsic/2022/548800a159/1LvAmC051qo",
"parentPublication": {
"id": "proceedings/iscsic/2022/5488/0",
"title": "2022 6th International Symposium on Computer Science and Intelligent Control (ISCSIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10106495",
"title": "Variational Relational Point Completion Network for Robust 3D Classification",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10106495/1MwAn9y4Ozu",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900i520",
"title": "Variational Relational Point Completion Network",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900i520/1yeLNkSQJX2",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09802694",
"articleId": "1Eo1x2xfhYs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09806341",
"articleId": "1Et0iwB480M",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Eo1x2xfhYs",
"doi": "10.1109/TVCG.2022.3184986",
"abstract": "Role-playing is widely used in many areas, such as psychotherapy and behavior change. However, few studies have explored the possible effects of playing multiple roles in a single role-playing process. We propose a new role-playing paradigm, called role-exchange playing, in which a user plays two opposite roles successively in the same simulated event for better cognitive enhancement. We designed an experiment with this novel role-exchange playing strategy in the immersive virtual environments; and school bullying was chosen as a scenario in this case. A total of 234 middle/high school students were enrolled in the mixed-design experiment. From the user study, we found that through role-exchange, students developed more morally correct opinions about bullying, as well as increased empathy and willingness to engage in supportive behavior. They also showed increased commitment to stopping bullying others. Our role-exchange paradigm could achieve a better effect than traditional role-playing methods in situations where participants have no prior experience associated with the roles they play. Therefore, using role-exchange playing in the immersive virtual environments to educate minors can help prevent them from bullying others in the real world. Our study indicates a positive significance in moral education of teenagers. Our role-exchange playing may have the potential to be extended to such applications as counseling, therapy, and crime prevention.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Role-playing is widely used in many areas, such as psychotherapy and behavior change. However, few studies have explored the possible effects of playing multiple roles in a single role-playing process. We propose a new role-playing paradigm, called role-exchange playing, in which a user plays two opposite roles successively in the same simulated event for better cognitive enhancement. We designed an experiment with this novel role-exchange playing strategy in the immersive virtual environments; and school bullying was chosen as a scenario in this case. A total of 234 middle/high school students were enrolled in the mixed-design experiment. From the user study, we found that through role-exchange, students developed more morally correct opinions about bullying, as well as increased empathy and willingness to engage in supportive behavior. They also showed increased commitment to stopping bullying others. Our role-exchange paradigm could achieve a better effect than traditional role-playing methods in situations where participants have no prior experience associated with the roles they play. Therefore, using role-exchange playing in the immersive virtual environments to educate minors can help prevent them from bullying others in the real world. Our study indicates a positive significance in moral education of teenagers. Our role-exchange playing may have the potential to be extended to such applications as counseling, therapy, and crime prevention.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Role-playing is widely used in many areas, such as psychotherapy and behavior change. However, few studies have explored the possible effects of playing multiple roles in a single role-playing process. We propose a new role-playing paradigm, called role-exchange playing, in which a user plays two opposite roles successively in the same simulated event for better cognitive enhancement. We designed an experiment with this novel role-exchange playing strategy in the immersive virtual environments; and school bullying was chosen as a scenario in this case. A total of 234 middle/high school students were enrolled in the mixed-design experiment. From the user study, we found that through role-exchange, students developed more morally correct opinions about bullying, as well as increased empathy and willingness to engage in supportive behavior. They also showed increased commitment to stopping bullying others. Our role-exchange paradigm could achieve a better effect than traditional role-playing methods in situations where participants have no prior experience associated with the roles they play. Therefore, using role-exchange playing in the immersive virtual environments to educate minors can help prevent them from bullying others in the real world. Our study indicates a positive significance in moral education of teenagers. Our role-exchange playing may have the potential to be extended to such applications as counseling, therapy, and crime prevention.",
"title": "Role-Exchange Playing: An Exploration of Role-Playing Effects for Anti-Bullying in Immersive Virtual Environments",
"normalizedTitle": "Role-Exchange Playing: An Exploration of Role-Playing Effects for Anti-Bullying in Immersive Virtual Environments",
"fno": "09802694",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Avatars",
"Virtual Environments",
"Cognition",
"Medical Treatment",
"Ethics",
"Training",
"Psychology",
"Anti Bullying",
"Minor Education",
"Role Reversal",
"Role Exchange",
"Role Playing",
"Virtual Reality"
],
"authors": [
{
"givenName": "Xiang",
"surname": "Gu",
"fullName": "Xiang Gu",
"affiliation": "Peking University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sheng",
"surname": "Li",
"fullName": "Sheng Li",
"affiliation": "Peking University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kangrui",
"surname": "Yi",
"fullName": "Kangrui Yi",
"affiliation": "Peking University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaojuan",
"surname": "Yang",
"fullName": "Xiaojuan Yang",
"affiliation": "Shandong Normal University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huiling",
"surname": "Liu",
"fullName": "Huiling Liu",
"affiliation": "Peking University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guoping",
"surname": "Wang",
"fullName": "Guoping Wang",
"affiliation": "Peking University, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-06-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iat/2005/2416/0/24160516",
"title": "Role-based Rights in Arti.cial Social Systems",
"doi": null,
"abstractUrl": "/proceedings-article/iat/2005/24160516/12OmNBd9T2b",
"parentPublication": {
"id": "proceedings/iat/2005/2416/0",
"title": "Proceedings. The 2005 IEEE/WIC/ACM International Conference on Intelligent Agent Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbi/2016/3231/1/3231a228",
"title": "Process Modelling as Serious Game: Design of a Role-Playing Game for a Corporate Training",
"doi": null,
"abstractUrl": "/proceedings-article/cbi/2016/3231a228/12OmNs59JRb",
"parentPublication": {
"id": "proceedings/cbi/2016/3231/2",
"title": "2016 IEEE 18th Conference on Business Informatics (CBI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2016/9041/0/9041a148",
"title": "Teaching STEM through a Role-Playing Serious Game and Intelligent Pedagogical Agents",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2016/9041a148/12OmNwpGgN1",
"parentPublication": {
"id": "proceedings/icalt/2016/9041/0",
"title": "2016 IEEE 16th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mas/2015/9828/0/9828a037",
"title": "A Social Network Analysis of a Massively Multi-player On-Line Role Playing Game",
"doi": null,
"abstractUrl": "/proceedings-article/mas/2015/9828a037/12OmNyuPLlj",
"parentPublication": {
"id": "proceedings/mas/2015/9828/0",
"title": "2015 4th International Conference on Modeling and Simulation (MAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2011/9618/0/05718430",
"title": "Virtual Team Role Play Using Second Life for Teaching Business Process Concepts",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2011/05718430/12OmNywxlSe",
"parentPublication": {
"id": "proceedings/hicss/2011/9618/0",
"title": "2011 44th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/reet/2010/8786/0/05633115",
"title": "Experiences of using role playing andwiki in requirements engineering course projects",
"doi": null,
"abstractUrl": "/proceedings-article/reet/2010/05633115/12OmNzX6cge",
"parentPublication": {
"id": "proceedings/reet/2010/8786/0",
"title": "2010 5th International Workshop on Requirements Engineering Education and Training (REET 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ccat/2022/9069/0/906900a084",
"title": "Application of Role-Playing to Enhance Participation in Computer Network Technology Course",
"doi": null,
"abstractUrl": "/proceedings-article/ccat/2022/906900a084/1JZ3TfwlYT6",
"parentPublication": {
"id": "proceedings/ccat/2022/9069/0",
"title": "2022 International Conference on Computer Applications Technology (CCAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icekim/2022/1666/0/166600a376",
"title": "Development and Exploration of Serious Games for Anti-bullying Education : ——The Experience of Developing the Software of Anti-bullying Serious Games",
"doi": null,
"abstractUrl": "/proceedings-article/icekim/2022/166600a376/1KpBIXuCUq4",
"parentPublication": {
"id": "proceedings/icekim/2022/1666/0",
"title": "2022 3rd International Conference on Education, Knowledge and Information Management (ICEKIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsa-c/2023/6459/0/645900a171",
"title": "Role-playing software architecture styles",
"doi": null,
"abstractUrl": "/proceedings-article/icsa-c/2023/645900a171/1MBDgupz3fq",
"parentPublication": {
"id": "proceedings/icsa-c/2023/6459/0",
"title": "2023 IEEE 20th International Conference on Software Architecture Companion (ICSA-C)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/08/09293401",
"title": "Self-Illusion: A Study on Cognition of Role-Playing in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2022/08/09293401/1pyonpfZjoY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09801527",
"articleId": "1EmmQ2RjHbO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09804851",
"articleId": "1ErlpBk8JBS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1EECTcZ3c7C",
"name": "ttg555501-09802694s1-supp1-3184986.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09802694s1-supp1-3184986.mp4",
"extension": "mp4",
"size": "48.6 MB",
"__typename": "WebExtraType"
},
{
"id": "1EECRTPLbA4",
"name": "ttg555501-09802694s1-supp2-3184986.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09802694s1-supp2-3184986.pdf",
"extension": "pdf",
"size": "47.2 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1EmmQ2RjHbO",
"doi": "10.1109/TVCG.2022.3184247",
"abstract": "While neural networks (NN) have been successfully applied to many NLP tasks, the way they function is often difficult to interpret. In this article, we focus on binary text classification via NNs and propose a new tool, which includes a visualization of the decision boundary and the distances of data elements to this boundary. This tool increases the interpretability of NN. Our approach uses two innovative views: (1) an overview of the text representation space and (2) a local view allowing data exploration around the decision boundary for various localities of this representation space. These views are integrated into a visual platform, EBBE-Text, which also contains state-of-the-art visualizations of NN representation spaces and several kinds of information obtained from the classification process. The various views are linked through numerous interactive functionalities that enable easy exploration of texts and classification results via the various complementary views. A user study shows the effectiveness of the visual encoding and a case study illustrates the benefits of using our tool for the analysis of the classifications obtained with several recent NNs and two datasets.",
"abstracts": [
{
"abstractType": "Regular",
"content": "While neural networks (NN) have been successfully applied to many NLP tasks, the way they function is often difficult to interpret. In this article, we focus on binary text classification via NNs and propose a new tool, which includes a visualization of the decision boundary and the distances of data elements to this boundary. This tool increases the interpretability of NN. Our approach uses two innovative views: (1) an overview of the text representation space and (2) a local view allowing data exploration around the decision boundary for various localities of this representation space. These views are integrated into a visual platform, EBBE-Text, which also contains state-of-the-art visualizations of NN representation spaces and several kinds of information obtained from the classification process. The various views are linked through numerous interactive functionalities that enable easy exploration of texts and classification results via the various complementary views. A user study shows the effectiveness of the visual encoding and a case study illustrates the benefits of using our tool for the analysis of the classifications obtained with several recent NNs and two datasets.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "While neural networks (NN) have been successfully applied to many NLP tasks, the way they function is often difficult to interpret. In this article, we focus on binary text classification via NNs and propose a new tool, which includes a visualization of the decision boundary and the distances of data elements to this boundary. This tool increases the interpretability of NN. Our approach uses two innovative views: (1) an overview of the text representation space and (2) a local view allowing data exploration around the decision boundary for various localities of this representation space. These views are integrated into a visual platform, EBBE-Text, which also contains state-of-the-art visualizations of NN representation spaces and several kinds of information obtained from the classification process. The various views are linked through numerous interactive functionalities that enable easy exploration of texts and classification results via the various complementary views. A user study shows the effectiveness of the visual encoding and a case study illustrates the benefits of using our tool for the analysis of the classifications obtained with several recent NNs and two datasets.",
"title": "EBBE-Text: Explaining Neural Networks by Exploring Text Classification Decision Boundaries",
"normalizedTitle": "EBBE-Text: Explaining Neural Networks by Exploring Text Classification Decision Boundaries",
"fno": "09801527",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Artificial Neural Networks",
"Data Visualization",
"Computational Modeling",
"Natural Language Processing",
"Predictive Models",
"Task Analysis",
"Deep Learning",
"Binary Text Classification",
"Decision Boundary",
"Deep Learning",
"Interpretability",
"Neural Networks",
"Representation Space",
"Visual Analytics"
],
"authors": [
{
"givenName": "Alexis",
"surname": "Delaforge",
"fullName": "Alexis Delaforge",
"affiliation": "LIRMM, University of Montpellier and the CNRS, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jérôme",
"surname": "Azé",
"fullName": "Jérôme Azé",
"affiliation": "LIRMM, University of Montpellier and the CNRS, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sandra",
"surname": "Bringay",
"fullName": "Sandra Bringay",
"affiliation": "LIRMM, the AMIS research group, Paul Valéry University of Montpellier, the University of Montpellier, and the CNRS, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Caroline",
"surname": "Mollevi",
"fullName": "Caroline Mollevi",
"affiliation": "ICM, IDESP, INSERM, University of Montpellier, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Arnaud",
"surname": "Sallaberry",
"fullName": "Arnaud Sallaberry",
"affiliation": "LIRMM, the AMIS research group, Paul Valéry University of Montpellier, the University of Montpellier, and the CNRS, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Maximilien",
"surname": "Servajean",
"fullName": "Maximilien Servajean",
"affiliation": "LIRMM, the AMIS research group, Paul Valéry University of Montpellier, the University of Montpellier, and the CNRS, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-06-01 00:00:00",
"pubType": "trans",
"pages": "1-18",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/bibm/2012/2559/0/06392671",
"title": "iSimp: A sentence simplification system for biomedicail text",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2012/06392671/12OmNyFU75f",
"parentPublication": {
"id": "proceedings/bibm/2012/2559/0",
"title": "2012 IEEE International Conference on Bioinformatics and Biomedicine",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icitbe/2021/0099/0/009900a035",
"title": "A Text Classification Method Based on Graph Attention Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icitbe/2021/009900a035/1AH7MM9a012",
"parentPublication": {
"id": "proceedings/icitbe/2021/0099/0",
"title": "2021 International Conference on Information Technology and Biomedical Engineering (ICITBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icceai/2022/6803/0/680300a404",
"title": "Text Semantic Representation Based on Knowledge Graph Correction",
"doi": null,
"abstractUrl": "/proceedings-article/icceai/2022/680300a404/1FUVO1IgtPi",
"parentPublication": {
"id": "proceedings/icceai/2022/6803/0",
"title": "2022 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnlp/2022/9544/0/954400a420",
"title": "Graph Convolutional Networks for Fast Text Classification",
"doi": null,
"abstractUrl": "/proceedings-article/icnlp/2022/954400a420/1GNtp6dDqRG",
"parentPublication": {
"id": "proceedings/icnlp/2022/9544/0",
"title": "2022 4th International Conference on Natural Language Processing (ICNLP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsn-s/2019/3028/0/302800a025",
"title": "Towards a Bayesian Approach for Assessing Fault Tolerance of Deep Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/dsn-s/2019/302800a025/1cI5RM2ftcY",
"parentPublication": {
"id": "proceedings/dsn-s/2019/3028/0",
"title": "2019 49th Annual IEEE/IFIP International Conference on Dependable Systems and Networks – Supplemental Volume (DSN-S)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "letters/ca/2020/02/09234705",
"title": "Adapting In Situ Accelerators for Sparsity with Granular Matrix Reordering",
"doi": null,
"abstractUrl": "/journal/ca/2020/02/09234705/1oDXEHhZgek",
"parentPublication": {
"id": "letters/ca",
"title": "IEEE Computer Architecture Letters",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isca/2021/3333/0/333300a692",
"title": "ELSA: Hardware-Software Co-design for Efficient, Lightweight Self-Attention Mechanism in Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/isca/2021/333300a692/1vNjIaHwBLq",
"parentPublication": {
"id": "proceedings/isca/2021/3333/0",
"title": "2021 ACM/IEEE 48th Annual International Symposium on Computer Architecture (ISCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ai/2022/05/09551764",
"title": "A Review of Text Style Transfer Using Deep Learning",
"doi": null,
"abstractUrl": "/journal/ai/2022/05/09551764/1xgx5fmhGzC",
"parentPublication": {
"id": "trans/ai",
"title": "IEEE Transactions on Artificial Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900c195",
"title": "A Peek Into the Reasoning of Neural Networks: Interpreting with Structural Visual Concepts",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900c195/1yeMjVGljFK",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2022/11/09674227",
"title": "Building High-Throughput Neural Architecture Search Workflows via a Decoupled Fitness Prediction Engine",
"doi": null,
"abstractUrl": "/journal/td/2022/11/09674227/1zYf6SHCZKE",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09798876",
"articleId": "1Eho8QXQucg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09802694",
"articleId": "1Eo1x2xfhYs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1Eo1xJ74NEs",
"name": "ttg555501-09801527s1-supp1-3184247.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09801527s1-supp1-3184247.pdf",
"extension": "pdf",
"size": "218 kB",
"__typename": "WebExtraType"
},
{
"id": "1Eo1xBx14fC",
"name": "ttg555501-09801527s1-supp2-3184247.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09801527s1-supp2-3184247.mp4",
"extension": "mp4",
"size": "15.6 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1Eho8QXQucg",
"doi": "10.1109/TVCG.2022.3184047",
"abstract": "Light field (LF) imaging expands traditional imaging techniques by simultaneously capturing the intensity and direction information of light rays, and promotes many visual applications. However, owing to the inherent trade-off between the spatial and angular dimensions, LF images acquired by LF cameras usually suffer from low spatial resolution. Many current approaches increase the spatial resolution by exploring the four-dimensional (4D) structure of the LF images, but they have difficulties in recovering fine textures at a large upscaling factor. To address this challenge, this paper proposes a new deep learning-based LF spatial super-resolution method using heterogeneous imaging (LFSSR-HI). The designed heterogeneous imaging system uses an extra high-resolution (HR) traditional camera to capture the abundant spatial information in addition to the LF camera imaging, where the auxiliary information from the HR camera is utilized to super-resolve the LF image. Specifically, an LF feature alignment module is constructed to learn the correspondence between the 4D LF image and the 2D HR image to realize information alignment. Subsequently, a multi-level spatial-angular feature enhancement module is designed to gradually embed the aligned HR information into the rough LF features. Finally, the enhanced LF features are reconstructed into a super-resolved LF image using a simple feature decoder. To improve the flexibility of the proposed method, a pyramid reconstruction strategy is leveraged to generate multi-scale super-resolution results in one forward inference. The experimental results show that the proposed LFSSR-HI method achieves significant advantages over the state-of-the-art methods in both qualitative and quantitative comparisons. Furthermore, the proposed method preserves more accurate angular consistency.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Light field (LF) imaging expands traditional imaging techniques by simultaneously capturing the intensity and direction information of light rays, and promotes many visual applications. However, owing to the inherent trade-off between the spatial and angular dimensions, LF images acquired by LF cameras usually suffer from low spatial resolution. Many current approaches increase the spatial resolution by exploring the four-dimensional (4D) structure of the LF images, but they have difficulties in recovering fine textures at a large upscaling factor. To address this challenge, this paper proposes a new deep learning-based LF spatial super-resolution method using heterogeneous imaging (LFSSR-HI). The designed heterogeneous imaging system uses an extra high-resolution (HR) traditional camera to capture the abundant spatial information in addition to the LF camera imaging, where the auxiliary information from the HR camera is utilized to super-resolve the LF image. Specifically, an LF feature alignment module is constructed to learn the correspondence between the 4D LF image and the 2D HR image to realize information alignment. Subsequently, a multi-level spatial-angular feature enhancement module is designed to gradually embed the aligned HR information into the rough LF features. Finally, the enhanced LF features are reconstructed into a super-resolved LF image using a simple feature decoder. To improve the flexibility of the proposed method, a pyramid reconstruction strategy is leveraged to generate multi-scale super-resolution results in one forward inference. The experimental results show that the proposed LFSSR-HI method achieves significant advantages over the state-of-the-art methods in both qualitative and quantitative comparisons. Furthermore, the proposed method preserves more accurate angular consistency.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Light field (LF) imaging expands traditional imaging techniques by simultaneously capturing the intensity and direction information of light rays, and promotes many visual applications. However, owing to the inherent trade-off between the spatial and angular dimensions, LF images acquired by LF cameras usually suffer from low spatial resolution. Many current approaches increase the spatial resolution by exploring the four-dimensional (4D) structure of the LF images, but they have difficulties in recovering fine textures at a large upscaling factor. To address this challenge, this paper proposes a new deep learning-based LF spatial super-resolution method using heterogeneous imaging (LFSSR-HI). The designed heterogeneous imaging system uses an extra high-resolution (HR) traditional camera to capture the abundant spatial information in addition to the LF camera imaging, where the auxiliary information from the HR camera is utilized to super-resolve the LF image. Specifically, an LF feature alignment module is constructed to learn the correspondence between the 4D LF image and the 2D HR image to realize information alignment. Subsequently, a multi-level spatial-angular feature enhancement module is designed to gradually embed the aligned HR information into the rough LF features. Finally, the enhanced LF features are reconstructed into a super-resolved LF image using a simple feature decoder. To improve the flexibility of the proposed method, a pyramid reconstruction strategy is leveraged to generate multi-scale super-resolution results in one forward inference. The experimental results show that the proposed LFSSR-HI method achieves significant advantages over the state-of-the-art methods in both qualitative and quantitative comparisons. Furthermore, the proposed method preserves more accurate angular consistency.",
"title": "Deep Light Field Spatial Super-Resolution Using Heterogeneous Imaging",
"normalizedTitle": "Deep Light Field Spatial Super-Resolution Using Heterogeneous Imaging",
"fno": "09798876",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Cameras",
"Spatial Resolution",
"Superresolution",
"Visualization",
"Image Reconstruction",
"Light Fields",
"Training",
"Light Field",
"Heterogeneous Imaging",
"Spatial Super Resolution",
"Pyramid Reconstruction"
],
"authors": [
{
"givenName": "Yeyao",
"surname": "Chen",
"fullName": "Yeyao Chen",
"affiliation": "Faculty of Information Science and Engineering, Ningbo University, Ningbo, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gangyi",
"surname": "Jiang",
"fullName": "Gangyi Jiang",
"affiliation": "Faculty of Information Science and Engineering, Ningbo University, Ningbo, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mei",
"surname": "Yu",
"fullName": "Mei Yu",
"affiliation": "Faculty of Information Science and Engineering, Ningbo University, Ningbo, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haiyong",
"surname": "Xu",
"fullName": "Haiyong Xu",
"affiliation": "Faculty of Information Science and Engineering, Ningbo University, Ningbo, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yo-Sung",
"surname": "Ho",
"fullName": "Yo-Sung Ho",
"affiliation": "School of Electrical Engineering and ComputerScience, Gwangju Institute of Science and Technology, Gwangju, South Korea",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-06-01 00:00:00",
"pubType": "trans",
"pages": "1-16",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccp/2014/5188/0/06831814",
"title": "Improving resolution and depth-of-field of light field cameras using a hybrid imaging system",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2014/06831814/12OmNyaoDEw",
"parentPublication": {
"id": "proceedings/iccp/2014/5188/0",
"title": "2014 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/05/08620368",
"title": "Light Field Super-Resolution Using a Low-Rank Prior and Deep Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/journal/tp/2020/05/08620368/17D45Wt3Exc",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/01/09716806",
"title": "Disentangling Light Fields for Super-Resolution and Disparity Estimation",
"doi": null,
"abstractUrl": "/journal/tp/2023/01/09716806/1B5WzcrxgIM",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2022/7218/0/09859373",
"title": "LFC-SASR: Light Field Coding Using Spatial and Angular Super-Resolution",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2022/09859373/1G4F0ndbVoQ",
"parentPublication": {
"id": "proceedings/icmew/2022/7218/0",
"title": "2022 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccbd/2022/5716/0/10079964",
"title": "Multiple Magnification Spatial Super-Resolution Network for Light Field Images Based on EPI Solid",
"doi": null,
"abstractUrl": "/proceedings-article/iccbd/2022/10079964/1LSP47RLwFq",
"parentPublication": {
"id": "proceedings/iccbd/2022/5716/0",
"title": "2022 5th International Conference on Computing and Big Data (ICCBD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2019/2506/0/250600b804",
"title": "Light Field Super-Resolution: A Benchmark",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2019/250600b804/1iTvo7kjJFm",
"parentPublication": {
"id": "proceedings/cvprw/2019/2506/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/12/09099445",
"title": "CrossNet++: Cross-Scale Large-Parallax Warping for Reference-Based Super-Resolution",
"doi": null,
"abstractUrl": "/journal/tp/2021/12/09099445/1k7oyvQ9LzO",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800c257",
"title": "Light Field Spatial Super-Resolution via Deep Combinatorial Geometry Embedding and Structural Consistency Regularization",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800c257/1m3npj9GAZa",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/10/09448470",
"title": "Deep Spatial-Angular Regularization for Light Field Imaging, Denoising, and Super-Resolution",
"doi": null,
"abstractUrl": "/journal/tp/2022/10/09448470/1ugE5vtunqo",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900k0005",
"title": "Light Field Super-Resolution with Zero-Shot Learning",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900k0005/1yeISN5Dx4c",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09797843",
"articleId": "1EfIX5LNd5e",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09801527",
"articleId": "1EmmQ2RjHbO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1Eo1xZZmf6g",
"name": "ttg555501-09798876s1-supp1-3184047.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09798876s1-supp1-3184047.pdf",
"extension": "pdf",
"size": "5.61 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1EexleVk9kk",
"doi": "10.1109/TVCG.2022.3183264",
"abstract": "We present a registration method relying on geometric constraints extracted from parametric primitives contained in 3D parametric models. Our method solves the registration in closed-form from three line-to-line, line-to-plane or plane-to-plane correspondences. The approach either works with semantically segmented RGB-D scans of the scene or with the output of plane detection in common frameworks like ARKit and ARCore. Based on the primitives detected in the scene, we build a list of descriptors using the normals and centroids of all the found primitives, and match them against the pre-computed list of descriptors from the model in order to find the scene-to-model primitive correspondences. Finally, we use our closed-form solver to estimate the 6DOF transformation from three lines and one point, which we obtain from the parametric representations of the model and scene parametric primitives. Quantitative and qualitative experiments on synthetic and real-world data sets demonstrate the performance and robustness of our method. We show that it can be used to create <italic>compact world anchors</italic> for indoor localization in AR applications on mobile devices leveraging commercial SLAM capabilities.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a registration method relying on geometric constraints extracted from parametric primitives contained in 3D parametric models. Our method solves the registration in closed-form from three line-to-line, line-to-plane or plane-to-plane correspondences. The approach either works with semantically segmented RGB-D scans of the scene or with the output of plane detection in common frameworks like ARKit and ARCore. Based on the primitives detected in the scene, we build a list of descriptors using the normals and centroids of all the found primitives, and match them against the pre-computed list of descriptors from the model in order to find the scene-to-model primitive correspondences. Finally, we use our closed-form solver to estimate the 6DOF transformation from three lines and one point, which we obtain from the parametric representations of the model and scene parametric primitives. Quantitative and qualitative experiments on synthetic and real-world data sets demonstrate the performance and robustness of our method. We show that it can be used to create <italic>compact world anchors</italic> for indoor localization in AR applications on mobile devices leveraging commercial SLAM capabilities.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a registration method relying on geometric constraints extracted from parametric primitives contained in 3D parametric models. Our method solves the registration in closed-form from three line-to-line, line-to-plane or plane-to-plane correspondences. The approach either works with semantically segmented RGB-D scans of the scene or with the output of plane detection in common frameworks like ARKit and ARCore. Based on the primitives detected in the scene, we build a list of descriptors using the normals and centroids of all the found primitives, and match them against the pre-computed list of descriptors from the model in order to find the scene-to-model primitive correspondences. Finally, we use our closed-form solver to estimate the 6DOF transformation from three lines and one point, which we obtain from the parametric representations of the model and scene parametric primitives. Quantitative and qualitative experiments on synthetic and real-world data sets demonstrate the performance and robustness of our method. We show that it can be used to create compact world anchors for indoor localization in AR applications on mobile devices leveraging commercial SLAM capabilities.",
"title": "Compact World Anchors: Registration Using Parametric Primitives as Scene Description",
"normalizedTitle": "Compact World Anchors: Registration Using Parametric Primitives as Scene Description",
"fno": "09797054",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Three Dimensional Displays",
"Simultaneous Localization And Mapping",
"Mathematical Models",
"Location Awareness",
"Cameras",
"Semantics",
"Data Models",
"3 D Registration",
"Augmented Reality",
"Camera Localization",
"Closed Form Method",
"Correspondence Problem"
],
"authors": [
{
"givenName": "Fernando",
"surname": "Reyes-Aviles",
"fullName": "Fernando Reyes-Aviles",
"affiliation": "VRVis Competence Center in Vienna, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Philipp",
"surname": "Fleck",
"fullName": "Philipp Fleck",
"affiliation": "Graz University of Technology, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dieter",
"surname": "Schmalstieg",
"fullName": "Dieter Schmalstieg",
"affiliation": "Graz University of Technology, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Clemens",
"surname": "Arth",
"fullName": "Clemens Arth",
"affiliation": "Graz University of Technology, Austria",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-06-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2018/4886/0/488601a955",
"title": "Incremental Structural Modeling Based on Geometric and Statistical Analyses",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2018/488601a955/12OmNBQTJft",
"parentPublication": {
"id": "proceedings/wacv/2018/4886/0",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457f612",
"title": "Convex Global 3D Registration with Lagrangian Duality",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457f612/12OmNqyUUwV",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2016/2491/0/2491a456",
"title": "Texture-Aware SLAM Using Stereo Imagery and Inertial Information",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2016/2491a456/12OmNx1IwdI",
"parentPublication": {
"id": "proceedings/crv/2016/2491/0",
"title": "2016 13th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2018/08/08006257",
"title": "Piecewise-Planar StereoScan: Sequential Structure and Motion Using Plane Primitives",
"doi": null,
"abstractUrl": "/journal/tp/2018/08/08006257/13rRUEgaru7",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a616",
"title": "Multi-planar Monocular Reconstruction of Manhattan Indoor Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a616/17D45XvMcbo",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2021/08/09055083",
"title": "DynamicSLAM: Leveraging Human Anchors for Ubiquitous Low-Overhead Indoor Localization",
"doi": null,
"abstractUrl": "/journal/tm/2021/08/09055083/1iHr5YtaDvy",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a959",
"title": "FC-vSLAM: Integrating Feature Credibility in Visual SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a959/1qyxjlMBUYw",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09496211",
"title": "PlaneFusion: Real-Time Indoor Scene Reconstruction With Planar Prior",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09496211/1vyjumhb4ZO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsd/2021/2703/0/270300a024",
"title": "An efficient FPGA-based co-processor for feature point detection and tracking",
"doi": null,
"abstractUrl": "/proceedings-article/dsd/2021/270300a024/1xCbcVVffhK",
"parentPublication": {
"id": "proceedings/dsd/2021/2703/0",
"title": "2021 24th Euromicro Conference on Digital System Design (DSD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icaa/2021/3730/0/373000a145",
"title": "Improved Loop Detection Method Based on ICP and NDT Registration Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/icaa/2021/373000a145/1zL1Mh20NnG",
"parentPublication": {
"id": "proceedings/icaa/2021/3730/0",
"title": "2021 International Conference on Intelligent Computing, Automation and Applications (ICAA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09793626",
"articleId": "1E5LEepCqTC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09797843",
"articleId": "1EfIX5LNd5e",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1EfIX5LNd5e",
"doi": "10.1109/TVCG.2022.3183400",
"abstract": "Automatic generation of fonts can greatly facilitate the font design process, and provide prototypes where designers can draw inspiration from. Existing generation methods are mainly built upon rasterized glyph images to utilize the successful convolutional architecture, but ignore the vector nature of glyph shapes. We present an implicit representation, modeling each glyph as shape primitives enclosed by several quadratic curves. This structured implicit representation is shown to be better suited for glyph modeling, and enables rendering glyph images at arbitrary high resolutions. Our representation gives high-quality glyph reconstruction and interpolation results, and performs well on the challenging one-shot font style transfer task comparing to other alternatives both qualitatively and quantitatively.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Automatic generation of fonts can greatly facilitate the font design process, and provide prototypes where designers can draw inspiration from. Existing generation methods are mainly built upon rasterized glyph images to utilize the successful convolutional architecture, but ignore the vector nature of glyph shapes. We present an implicit representation, modeling each glyph as shape primitives enclosed by several quadratic curves. This structured implicit representation is shown to be better suited for glyph modeling, and enables rendering glyph images at arbitrary high resolutions. Our representation gives high-quality glyph reconstruction and interpolation results, and performs well on the challenging one-shot font style transfer task comparing to other alternatives both qualitatively and quantitatively.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Automatic generation of fonts can greatly facilitate the font design process, and provide prototypes where designers can draw inspiration from. Existing generation methods are mainly built upon rasterized glyph images to utilize the successful convolutional architecture, but ignore the vector nature of glyph shapes. We present an implicit representation, modeling each glyph as shape primitives enclosed by several quadratic curves. This structured implicit representation is shown to be better suited for glyph modeling, and enables rendering glyph images at arbitrary high resolutions. Our representation gives high-quality glyph reconstruction and interpolation results, and performs well on the challenging one-shot font style transfer task comparing to other alternatives both qualitatively and quantitatively.",
"title": "Learning Implicit Glyph Shape Representation",
"normalizedTitle": "Learning Implicit Glyph Shape Representation",
"fno": "09797843",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Shape",
"Task Analysis",
"Rendering Computer Graphics",
"Image Resolution",
"Three Dimensional Displays",
"Solid Modeling",
"Graphics",
"Font Generation",
"Implicit Representation"
],
"authors": [
{
"givenName": "Ying-Tian",
"surname": "Liu",
"fullName": "Ying-Tian Liu",
"affiliation": "Department of Computer Science and Technology, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yuan-Chen",
"surname": "Guo",
"fullName": "Yuan-Chen Guo",
"affiliation": "Department of Computer Science and Technology, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yi-Xiao",
"surname": "Li",
"fullName": "Yi-Xiao Li",
"affiliation": "Academy of Arts & Design, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chen",
"surname": "Wang",
"fullName": "Chen Wang",
"affiliation": "Department of Computer Science and Technology, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Song-Hai",
"surname": "Zhang",
"fullName": "Song-Hai Zhang",
"affiliation": "Department of Computer Science and Technology, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-06-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2021/2812/0/281200n3067",
"title": "Multiresolution Deep Implicit Functions for 3D Shape Representation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200n3067/1BmL39Zkm9G",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200m2633",
"title": "Deep Implicit Surface Point Prediction Networks",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200m2633/1BmLiNnsKo8",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600m2819",
"title": "GIFS: Neural Implicit Function for General Shape Representation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600m2819/1H0KHzfm1Ta",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600s8593",
"title": "UNIST: Unpaired Neural Implicit Shape Translation Network",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600s8593/1H1kv6nHUFq",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2023/4544/0/10042505",
"title": "Facial Geometric Detail Recovery via Implicit Representation",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2023/10042505/1KOuYhOxDmo",
"parentPublication": {
"id": "proceedings/fg/2023/4544/0",
"title": "2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300h153",
"title": "Learning Shape Templates With Structured Implicit Functions",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300h153/1hVlFj4REmk",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icitbs/2020/6698/0/669800a954",
"title": "A Word Representation Method Based on Glyph of Chinese Character",
"doi": null,
"abstractUrl": "/proceedings-article/icitbs/2020/669800a954/1kuHOo4XaPm",
"parentPublication": {
"id": "proceedings/icitbs/2020/6698/0",
"title": "2020 International Conference on Intelligent Transportation, Big Data & Smart City (ICITBS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a452",
"title": "Learning Implicit Surface Light Fields",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a452/1qyxkR2YxGM",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900b429",
"title": "Deep Implicit Templates for 3D Shape Representation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900b429/1yeLEHLhLEc",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900i829",
"title": "Holistic 3D Scene Understanding from a Single Image with Implicit Representation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900i829/1yeLsqtovKg",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09797054",
"articleId": "1EexleVk9kk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09798876",
"articleId": "1Eho8QXQucg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1Eho8C5k9sA",
"name": "ttg555501-09797843s1-supp1-3183400.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09797843s1-supp1-3183400.pdf",
"extension": "pdf",
"size": "20.4 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1E5LEFpMA48",
"doi": "10.1109/TVCG.2022.3180899",
"abstract": "We propose Composite Parallel Coordinates, a novel parallel coordinates technique to effectively represent the interplay of component alternatives in a system. It builds upon a dedicated data model that formally describes the interaction of components. Parallel coordinates can help decision-makers identify the most preferred solution among a number of alternatives. Multi-component systems require one such multi-attribute choice for each component. Each of these choices might have side effects on the system's operability and performance, making them co-dependent. Common approaches employ complex multi-component models or involve back-and-forth iterations between single components until an acceptable compromise is reached. A simultaneous visual exploration across independently modeled but connected components is needed to make system design more efficient. Using dedicated layout and interaction strategies, our Composite Parallel Coordinates allow analysts to explore both individual properties of components as well as their interoperability and joint performance. We showcase the effectiveness of Composite Parallel Coordinates for co-dependent multi-attribute choices by means of three real-world scenarios from distinct application areas. In addition to the case studies, we reflect on observing two domain experts collaboratively working with the proposed technique and communicating along the way.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose Composite Parallel Coordinates, a novel parallel coordinates technique to effectively represent the interplay of component alternatives in a system. It builds upon a dedicated data model that formally describes the interaction of components. Parallel coordinates can help decision-makers identify the most preferred solution among a number of alternatives. Multi-component systems require one such multi-attribute choice for each component. Each of these choices might have side effects on the system's operability and performance, making them co-dependent. Common approaches employ complex multi-component models or involve back-and-forth iterations between single components until an acceptable compromise is reached. A simultaneous visual exploration across independently modeled but connected components is needed to make system design more efficient. Using dedicated layout and interaction strategies, our Composite Parallel Coordinates allow analysts to explore both individual properties of components as well as their interoperability and joint performance. We showcase the effectiveness of Composite Parallel Coordinates for co-dependent multi-attribute choices by means of three real-world scenarios from distinct application areas. In addition to the case studies, we reflect on observing two domain experts collaboratively working with the proposed technique and communicating along the way.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose Composite Parallel Coordinates, a novel parallel coordinates technique to effectively represent the interplay of component alternatives in a system. It builds upon a dedicated data model that formally describes the interaction of components. Parallel coordinates can help decision-makers identify the most preferred solution among a number of alternatives. Multi-component systems require one such multi-attribute choice for each component. Each of these choices might have side effects on the system's operability and performance, making them co-dependent. Common approaches employ complex multi-component models or involve back-and-forth iterations between single components until an acceptable compromise is reached. A simultaneous visual exploration across independently modeled but connected components is needed to make system design more efficient. Using dedicated layout and interaction strategies, our Composite Parallel Coordinates allow analysts to explore both individual properties of components as well as their interoperability and joint performance. We showcase the effectiveness of Composite Parallel Coordinates for co-dependent multi-attribute choices by means of three real-world scenarios from distinct application areas. In addition to the case studies, we reflect on observing two domain experts collaboratively working with the proposed technique and communicating along the way.",
"title": "COMPO*SED: Composite Parallel Coordinates for Co-Dependent Multi-Attribute Choices",
"normalizedTitle": "COMPO*SED: Composite Parallel Coordinates for Co-Dependent Multi-Attribute Choices",
"fno": "09792437",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"System Performance",
"Interoperability",
"Data Models",
"Task Analysis",
"Lenses",
"Cameras",
"Iterative Methods",
"Multi Criteria Decision Making",
"Parallel Coordinates",
"Systems Engineering Design",
"Visual Exploration"
],
"authors": [
{
"givenName": "Lena",
"surname": "Cibulski",
"fullName": "Lena Cibulski",
"affiliation": "Fraunhofer IGD, Darmstadt, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Thorsten",
"surname": "May",
"fullName": "Thorsten May",
"affiliation": "Fraunhofer IGD, Darmstadt, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Johanna",
"surname": "Schmidt",
"fullName": "Johanna Schmidt",
"affiliation": "VRVis Research Center, Vienna, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jörn",
"surname": "Kohlhammer",
"fullName": "Jörn Kohlhammer",
"affiliation": "Fraunhofer IGD, Darmstadt, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-06-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2014/4103/0/4103a007",
"title": "Spectral-Based Contractible Parallel Coordinates",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2014/4103a007/12OmNCgrDcV",
"parentPublication": {
"id": "proceedings/iv/2014/4103/0",
"title": "2014 18th International Conference on Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/euromicro/1998/8646/2/864620997",
"title": "Composite Objects: Real-Time Programming with CORBA",
"doi": null,
"abstractUrl": "/proceedings-article/euromicro/1998/864620997/12OmNwdbVe3",
"parentPublication": {
"id": "proceedings/euromicro/1998/8646/2",
"title": "EUROMICRO Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2012/4736/0/4736a432",
"title": "Deriving Specifications for Composite Web Services",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2012/4736a432/12OmNwnYFYj",
"parentPublication": {
"id": "proceedings/compsac/2012/4736/0",
"title": "2012 IEEE 36th Annual Computer Software and Applications Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2006/2606/0/26060347",
"title": "Construction of 3D Composite Objects from Range Data",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2006/26060347/12OmNy3iFmH",
"parentPublication": {
"id": "proceedings/cgiv/2006/2606/0",
"title": "International Conference on Computer Graphics, Imaging and Visualisation (CGIV'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/infvis/2005/9464/0/01532141",
"title": "An interactive 3D integration of parallel coordinates and star glyphs",
"doi": null,
"abstractUrl": "/proceedings-article/infvis/2005/01532141/12OmNyuPLlh",
"parentPublication": {
"id": "proceedings/infvis/2005/9464/0",
"title": "IEEE Symposium on Information Visualization (InfoVis 05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2000/6293/2/00859055",
"title": "An efficient algorithm to extract components of a composite signal",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2000/00859055/12OmNzA6GS0",
"parentPublication": {
"id": "proceedings/icassp/2000/6293/2",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-infovis/2005/2790/0/01532141",
"title": "An interactive 3D integration of parallel coordinates and star glyphs",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/2005/01532141/12OmNzZEAtN",
"parentPublication": {
"id": "proceedings/ieee-infovis/2005/2790/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnp/1997/8061/0/80610105",
"title": "A Compositional Approach for Designing Multifunction Time-Dependent Protocols",
"doi": null,
"abstractUrl": "/proceedings-article/icnp/1997/80610105/12OmNzdoMX9",
"parentPublication": {
"id": "proceedings/icnp/1997/8061/0",
"title": "Proceedings 1997 International Conference on Network Protocols",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2013/08/ttk2013081863",
"title": "Principal Composite Kernel Feature Analysis: Data-Dependent Kernel Approach",
"doi": null,
"abstractUrl": "/journal/tk/2013/08/ttk2013081863/13rRUwbaqVh",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933706",
"title": "Slope-Dependent Rendering of Parallel Coordinates to Reduce Density Distortion and Ghost Clusters",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933706/1fTgJ8ktWak",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09793626",
"articleId": "1E5LEepCqTC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09797054",
"articleId": "1EexleVk9kk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1E5LEB1hFcc",
"name": "ttg555501-09792437s1-supp1-3180899.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09792437s1-supp1-3180899.mp4",
"extension": "mp4",
"size": "19.6 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1E5LEepCqTC",
"doi": "10.1109/TVCG.2022.3181262",
"abstract": "In virtual reality, VR sickness resulting from continuous locomotion via controllers or joysticks is still a significant problem. In this paper, we present a set of algorithms to mitigate VR sickness that dynamically modulate the user’s field of view by modifying the contrast of the periphery based on movement, color, and depth. In contrast with previous work, this vision modulator is a shader that is triggered by specific motions known to cause VR sickness, such as acceleration, strafing, and linear velocity. Moreover, the algorithm is governed by delta velocity, delta angle, and average color of the view. We ran two experiments with different washout periods to investigate the effectiveness of dynamic modulation on the symptoms of VR sickness, in which we compared this approach against baseline and pitch-black field-of-view restrictors. Our first experiment made use of a just-noticeable-sickness design, which can be useful for building experiments with a short washout period.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In virtual reality, VR sickness resulting from continuous locomotion via controllers or joysticks is still a significant problem. In this paper, we present a set of algorithms to mitigate VR sickness that dynamically modulate the user’s field of view by modifying the contrast of the periphery based on movement, color, and depth. In contrast with previous work, this vision modulator is a shader that is triggered by specific motions known to cause VR sickness, such as acceleration, strafing, and linear velocity. Moreover, the algorithm is governed by delta velocity, delta angle, and average color of the view. We ran two experiments with different washout periods to investigate the effectiveness of dynamic modulation on the symptoms of VR sickness, in which we compared this approach against baseline and pitch-black field-of-view restrictors. Our first experiment made use of a just-noticeable-sickness design, which can be useful for building experiments with a short washout period.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In virtual reality, VR sickness resulting from continuous locomotion via controllers or joysticks is still a significant problem. In this paper, we present a set of algorithms to mitigate VR sickness that dynamically modulate the user’s field of view by modifying the contrast of the periphery based on movement, color, and depth. In contrast with previous work, this vision modulator is a shader that is triggered by specific motions known to cause VR sickness, such as acceleration, strafing, and linear velocity. Moreover, the algorithm is governed by delta velocity, delta angle, and average color of the view. We ran two experiments with different washout periods to investigate the effectiveness of dynamic modulation on the symptoms of VR sickness, in which we compared this approach against baseline and pitch-black field-of-view restrictors. Our first experiment made use of a just-noticeable-sickness design, which can be useful for building experiments with a short washout period.",
"title": "Mitigation of VR Sickness during Locomotion with a Motion-Based Dynamic Vision Modulator",
"normalizedTitle": "Mitigation of VR Sickness during Locomotion with a Motion-Based Dynamic Vision Modulator",
"fno": "09793626",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visualization",
"Optical Flow",
"Modulation",
"Angular Velocity",
"Image Color Analysis",
"Teleportation",
"Legged Locomotion",
"VR Sickness",
"Contrast Manipulation",
"Vision Modulation",
"Shading And Rendering"
],
"authors": [
{
"givenName": "Guanghan",
"surname": "Zhao",
"fullName": "Guanghan Zhao",
"affiliation": "Osaka University, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jason",
"surname": "Orlosky",
"fullName": "Jason Orlosky",
"affiliation": "Osaka University, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Steven",
"surname": "Feiner",
"fullName": "Steven Feiner",
"affiliation": "Columbia University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Photchara",
"surname": "Ratsamee",
"fullName": "Photchara Ratsamee",
"affiliation": "Osaka University, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yuki",
"surname": "Uranishi",
"fullName": "Yuki Uranishi",
"affiliation": "Osaka University, Japan",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-06-01 00:00:00",
"pubType": "trans",
"pages": "1-13",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892348",
"title": "Steering locomotion by vestibular perturbation in room-scale VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892348/12OmNvrMUgU",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446130",
"title": "Rapid, Continuous Movement Between Nodes as an Accessible Virtual Reality Locomotion Technique",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446130/13bd1f3HvEx",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cost/2022/6248/0/624800a169",
"title": "Development of VR Motion Sickness Test Platform Based on UE",
"doi": null,
"abstractUrl": "/proceedings-article/cost/2022/624800a169/1H2pqPKjkAg",
"parentPublication": {
"id": "proceedings/cost/2022/6248/0",
"title": "2022 International Conference on Culture-Oriented Science and Technology (CoST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10075482",
"title": "An Evaluation of View Rotation Techniques for Seated Navigation in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10075482/1LAuCOR3RE4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a094",
"title": "An EEG-based Experiment on VR Sickness and Postural Instability While Walking in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a094/1MNgWtYsR5S",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798158",
"title": "PhantomLegs: Reducing Virtual Reality Sickness Using Head-Worn Haptic Devices",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798158/1cJ16zT3GdW",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798136",
"title": "VR Sickness in Continuous Exposure to Live-action 180°Video",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798136/1cJ1gPJX2og",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08798880",
"title": "Sick Moves! Motion Parameters as Indicators of Simulator Sickness",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08798880/1cumZbd4qNG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a735",
"title": "[DC] Towards Universal VR Sickness Mitigation Strategies",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a735/1tnXDI2lhHq",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a380",
"title": "Evaluating VR Sickness in VR Locomotion Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a380/1tnXc1raaxq",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09786815",
"articleId": "1DSumaVNxG8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09792437",
"articleId": "1E5LEFpMA48",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1Eb19m8rRTO",
"name": "ttg555501-09793626s1-supp1-3181262.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09793626s1-supp1-3181262.mp4",
"extension": "mp4",
"size": "67.4 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1DSumaVNxG8",
"doi": "10.1109/TVCG.2022.3179766",
"abstract": "As one of the facial expression recognition techniques for Head-Mounted Display (HMD) users, embedded photo-reflective sensors have been used. In this paper, we investigate how gaze and face directions affect facial expression recognition using the embedded photo-reflective sensors. First, we collected a dataset of five facial expressions (Neutral, Happy, Angry, Sad, Surprised) while looking in diverse directions by moving 1) the eyes and 2) the head. Using the dataset, we analyzed the effect of gaze and face directions by constructing facial expression classifiers in five ways and evaluating the classification accuracy of each classifier. The results revealed that the single classifier that learned the data for all gaze points achieved the highest classification performance. Then, we investigated which facial part was affected by the gaze and face direction. The results showed that the gaze directions affected the upper facial parts, while the face directions affected the lower facial parts. In addition, by removing the bias of facial expression reproducibility, we investigated the pure effect of gaze and face directions in three conditions. The results showed that, in terms of gaze direction, building classifiers for each direction significantly improved the classification accuracy. However, in terms of face directions, there were slight differences between the classifier conditions. Our experimental results implied that multiple classifiers corresponding to multiple gaze and face directions improved facial expression recognition accuracy, but collecting the data of the vertical movement of gaze and face is a practical solution to improving facial expression recognition accuracy.",
"abstracts": [
{
"abstractType": "Regular",
"content": "As one of the facial expression recognition techniques for Head-Mounted Display (HMD) users, embedded photo-reflective sensors have been used. In this paper, we investigate how gaze and face directions affect facial expression recognition using the embedded photo-reflective sensors. First, we collected a dataset of five facial expressions (Neutral, Happy, Angry, Sad, Surprised) while looking in diverse directions by moving 1) the eyes and 2) the head. Using the dataset, we analyzed the effect of gaze and face directions by constructing facial expression classifiers in five ways and evaluating the classification accuracy of each classifier. The results revealed that the single classifier that learned the data for all gaze points achieved the highest classification performance. Then, we investigated which facial part was affected by the gaze and face direction. The results showed that the gaze directions affected the upper facial parts, while the face directions affected the lower facial parts. In addition, by removing the bias of facial expression reproducibility, we investigated the pure effect of gaze and face directions in three conditions. The results showed that, in terms of gaze direction, building classifiers for each direction significantly improved the classification accuracy. However, in terms of face directions, there were slight differences between the classifier conditions. Our experimental results implied that multiple classifiers corresponding to multiple gaze and face directions improved facial expression recognition accuracy, but collecting the data of the vertical movement of gaze and face is a practical solution to improving facial expression recognition accuracy.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "As one of the facial expression recognition techniques for Head-Mounted Display (HMD) users, embedded photo-reflective sensors have been used. In this paper, we investigate how gaze and face directions affect facial expression recognition using the embedded photo-reflective sensors. First, we collected a dataset of five facial expressions (Neutral, Happy, Angry, Sad, Surprised) while looking in diverse directions by moving 1) the eyes and 2) the head. Using the dataset, we analyzed the effect of gaze and face directions by constructing facial expression classifiers in five ways and evaluating the classification accuracy of each classifier. The results revealed that the single classifier that learned the data for all gaze points achieved the highest classification performance. Then, we investigated which facial part was affected by the gaze and face direction. The results showed that the gaze directions affected the upper facial parts, while the face directions affected the lower facial parts. In addition, by removing the bias of facial expression reproducibility, we investigated the pure effect of gaze and face directions in three conditions. The results showed that, in terms of gaze direction, building classifiers for each direction significantly improved the classification accuracy. However, in terms of face directions, there were slight differences between the classifier conditions. Our experimental results implied that multiple classifiers corresponding to multiple gaze and face directions improved facial expression recognition accuracy, but collecting the data of the vertical movement of gaze and face is a practical solution to improving facial expression recognition accuracy.",
"title": "Analyzing the Effect of Diverse Gaze and Head Direction on Facial Expression Recognition with Photo-Reflective Sensors Embedded in a Head-Mounted Display",
"normalizedTitle": "Analyzing the Effect of Diverse Gaze and Head Direction on Facial Expression Recognition with Photo-Reflective Sensors Embedded in a Head-Mounted Display",
"fno": "09786815",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Sensors",
"Face Recognition",
"Resists",
"Cameras",
"Avatars",
"Gravity",
"Optical Sensors",
"Facial Expression Recognition",
"Head Mounted Display",
"Embedded Photo Reflective Sensor",
"Gaze Direction",
"Face Direction"
],
"authors": [
{
"givenName": "Fumihiko",
"surname": "Nakamura",
"fullName": "Fumihiko Nakamura",
"affiliation": "Faculty of Science and Technology, Keio University, Kanagawa, Japan. K. Masai is with NTT Communication Science Laboratories and the Faculty of Science and Technology, Keio University, Kanagawa, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Masaaki",
"surname": "Murakami",
"fullName": "Masaaki Murakami",
"affiliation": "Faculty of Science and Technology, Keio University, Kanagawa, Japan. K. Masai is with NTT Communication Science Laboratories and the Faculty of Science and Technology, Keio University, Kanagawa, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Katsuhiro",
"surname": "Suzuki",
"fullName": "Katsuhiro Suzuki",
"affiliation": "Faculty of Science and Technology, Keio University, Kanagawa, Japan. K. Masai is with NTT Communication Science Laboratories and the Faculty of Science and Technology, Keio University, Kanagawa, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Masaaki",
"surname": "Fukuoka",
"fullName": "Masaaki Fukuoka",
"affiliation": "Faculty of Science and Technology, Keio University, Kanagawa, Japan. K. Masai is with NTT Communication Science Laboratories and the Faculty of Science and Technology, Keio University, Kanagawa, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Katsutoshi",
"surname": "Masai",
"fullName": "Katsutoshi Masai",
"affiliation": "Faculty of Science and Technology, Keio University, Kanagawa, Japan. K. Masai is with NTT Communication Science Laboratories and the Faculty of Science and Technology, Keio University, Kanagawa, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Maki",
"surname": "Sugimoto",
"fullName": "Maki Sugimoto",
"affiliation": "Faculty of Science and Technology, Keio University, Kanagawa, Japan. K. Masai is with NTT Communication Science Laboratories and the Faculty of Science and Technology, Keio University, Kanagawa, Japan",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-06-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/fg/2008/2153/0/04813466",
"title": "A fast and robust 3D head pose and gaze estimation system",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2008/04813466/12OmNBqv2dy",
"parentPublication": {
"id": "proceedings/fg/2008/2153/0",
"title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892245",
"title": "Recognition and mapping of facial expressions to avatar by embedded photo reflective sensors in head mounted display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892245/12OmNwkR5tU",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2018/5892/0/08466462",
"title": "Image-based Attention Level Estimation of Interaction Scene by Head Pose and Gaze Information",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2018/08466462/13Jkr9SfNnG",
"parentPublication": {
"id": "proceedings/icis/2018/5892/0",
"title": "2018 IEEE/ACIS 17th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2020/02/08319988",
"title": "Emotion Recognition in Simulated Social Interactions",
"doi": null,
"abstractUrl": "/journal/ta/2020/02/08319988/13rRUB7a1ea",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600c182",
"title": "Dynamic 3D Gaze from Afar: Deep Gaze Estimation from Temporal Eye-Head-Body Coordination",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600c182/1H1mDm1L85i",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a082",
"title": "Real-time Gaze Tracking with Head-eye Coordination for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a082/1JrQQ8dsLKM",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600d453",
"title": "Fine Gaze Redirection Learning with Gaze Hardness-aware Transformation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600d453/1L8qk4xmpvW",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797925",
"title": "Mask-off: Synthesizing Face Images in the Presence of Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797925/1cJ0J09XMdy",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797852",
"title": "Perception of Volumetric Characters' Eye-Gaze Direction in Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797852/1cJ0UskDCRa",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a101",
"title": "Digital Full-Face Mask Display with Expression Recognition using Embedded Photo Reflective Sensor Arrays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a101/1pystZgPICk",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09785918",
"articleId": "1DPaEdHg6KQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09793626",
"articleId": "1E5LEepCqTC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1DPaE3QYx68",
"doi": "10.1109/TVCG.2022.3178734",
"abstract": "The research topic of sketch-to-portrait generation has witnessed a boost of progress with deep learning techniques. The recently proposed StyleGAN architectures achieve state-of-the-art generation ability but the original StyleGAN is not friendly for sketch-based creation due to its unconditional generation nature. To address this issue, we propose a direct conditioning strategy to better preserve the spatial information under the StyleGAN framework. Specifically, we introduce Spatially Conditioned StyleGAN (SC-StyleGAN for short), which explicitly injects spatial constraints to the original StyleGAN generation process. We explore two input modalities, sketches and semantic maps, which together allow users to express desired generation results more precisely and easily. Based on SC-StyleGAN, we present DrawingInStyles, a novel drawing interface for non-professional users to easily produce high-quality, photo-realistic face images with precise control, either from scratch or editing existing ones. Qualitative and quantitative evaluations show the superior generation ability of our method to existing and alternative solutions. The usability and expressiveness of our system are confirmed by a user study.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The research topic of sketch-to-portrait generation has witnessed a boost of progress with deep learning techniques. The recently proposed StyleGAN architectures achieve state-of-the-art generation ability but the original StyleGAN is not friendly for sketch-based creation due to its unconditional generation nature. To address this issue, we propose a direct conditioning strategy to better preserve the spatial information under the StyleGAN framework. Specifically, we introduce Spatially Conditioned StyleGAN (SC-StyleGAN for short), which explicitly injects spatial constraints to the original StyleGAN generation process. We explore two input modalities, sketches and semantic maps, which together allow users to express desired generation results more precisely and easily. Based on SC-StyleGAN, we present DrawingInStyles, a novel drawing interface for non-professional users to easily produce high-quality, photo-realistic face images with precise control, either from scratch or editing existing ones. Qualitative and quantitative evaluations show the superior generation ability of our method to existing and alternative solutions. The usability and expressiveness of our system are confirmed by a user study.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The research topic of sketch-to-portrait generation has witnessed a boost of progress with deep learning techniques. The recently proposed StyleGAN architectures achieve state-of-the-art generation ability but the original StyleGAN is not friendly for sketch-based creation due to its unconditional generation nature. To address this issue, we propose a direct conditioning strategy to better preserve the spatial information under the StyleGAN framework. Specifically, we introduce Spatially Conditioned StyleGAN (SC-StyleGAN for short), which explicitly injects spatial constraints to the original StyleGAN generation process. We explore two input modalities, sketches and semantic maps, which together allow users to express desired generation results more precisely and easily. Based on SC-StyleGAN, we present DrawingInStyles, a novel drawing interface for non-professional users to easily produce high-quality, photo-realistic face images with precise control, either from scratch or editing existing ones. Qualitative and quantitative evaluations show the superior generation ability of our method to existing and alternative solutions. The usability and expressiveness of our system are confirmed by a user study.",
"title": "DrawingInStyles: Portrait Image Generation and Editing with Spatially Conditioned StyleGAN",
"normalizedTitle": "DrawingInStyles: Portrait Image Generation and Editing with Spatially Conditioned StyleGAN",
"fno": "09784910",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Faces",
"Semantics",
"Image Synthesis",
"Codes",
"Training",
"Transforms",
"Spatial Resolution",
"Sketch Based Portrait Generation",
"Suggestive Interfaces",
"Data Driven Approaches",
"Style GAN",
"Conditional Generation"
],
"authors": [
{
"givenName": "Wanchao",
"surname": "Su",
"fullName": "Wanchao Su",
"affiliation": "Department of Computer Science, Hui Ye and Hongbo Fu are with the School of Creative Media, City University of Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hui",
"surname": "Ye",
"fullName": "Hui Ye",
"affiliation": "Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology, Chinese Academy of Sciences. Lin Gao is also with University of Chinese Academy of Sciences",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shu-Yu",
"surname": "Chen",
"fullName": "Shu-Yu Chen",
"affiliation": "Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology, Chinese Academy of Sciences. Lin Gao is also with University of Chinese Academy of Sciences",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lin",
"surname": "Gao",
"fullName": "Lin Gao",
"affiliation": "Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology, Chinese Academy of Sciences. Lin Gao is also with University of Chinese Academy of Sciences",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hongbo",
"surname": "Fu",
"fullName": "Hongbo Fu",
"affiliation": "Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology, Chinese Academy of Sciences. Lin Gao is also with University of Chinese Academy of Sciences",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccd/2012/3051/0/315alaghi",
"title": "A spectral transform approach to stochastic circuits",
"doi": null,
"abstractUrl": "/proceedings-article/iccd/2012/315alaghi/12OmNx4yvqG",
"parentPublication": {
"id": "proceedings/iccd/2012/3051/0",
"title": "2012 IEEE 30th International Conference on Computer Design (ICCD 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2019/02/08640099",
"title": "Automatic Color Sketch Generation Using Deep Style Transfer",
"doi": null,
"abstractUrl": "/magazine/cg/2019/02/08640099/17D45VsBTXm",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500a955",
"title": "AE-StyleGAN: Improved Training of Style-Based Auto-Encoders",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500a955/1B12OZCI3JK",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200g742",
"title": "Multi-Class Multi-Instance Count Conditioned Adversarial Image Generation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200g742/1BmIpDwamYg",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbd/2022/0745/0/074500a145",
"title": "Embedding Chinese Face Painting Into the StyleGAN Latent Space",
"doi": null,
"abstractUrl": "/proceedings-article/cbd/2022/074500a145/1EViqpCPS00",
"parentPublication": {
"id": "proceedings/cbd/2022/0745/0",
"title": "2021 Ninth International Conference on Advanced Cloud and Big Data (CBD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600h683",
"title": "Pastiche Master: Exemplar-Based High-Resolution Portrait Style Transfer",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600h683/1H0NNPChQsM",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600h662",
"title": "FENeRF: Face Editing in Neural Radiance Fields",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600h662/1H1mBYZtG6Y",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600e308",
"title": "SketchInverter: Multi-Class Sketch-Based Image Generation via GAN Inversion",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600e308/1L8qBcO17xe",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900c256",
"title": "TediGAN: Text-Guided Diverse Face Image Generation and Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900c256/1yeKSL9mS2I",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccst/2021/4254/0/425400a520",
"title": "Improved Semantic-aware StyleGAN-based Real Face Editing Model",
"doi": null,
"abstractUrl": "/proceedings-article/iccst/2021/425400a520/1ziPdCmrGIE",
"parentPublication": {
"id": "proceedings/iccst/2021/4254/0",
"title": "2021 International Conference on Culture-oriented Science & Technology (ICCST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09783067",
"articleId": "1DIwTDMm7Mk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09785918",
"articleId": "1DPaEdHg6KQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1DPaCfUuvGE",
"name": "ttg555501-09784910s1-supp2-3178734.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09784910s1-supp2-3178734.pdf",
"extension": "pdf",
"size": "29 MB",
"__typename": "WebExtraType"
},
{
"id": "1DPaDNC9kJi",
"name": "ttg555501-09784910s1-supp1-3178734.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09784910s1-supp1-3178734.mp4",
"extension": "mp4",
"size": "136 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1DPaEdHg6KQ",
"doi": "10.1109/TVCG.2022.3179269",
"abstract": "Real walking techniques can provide the user with a more natural, highly immersive walking experience compared to the experience of other locomotion techniques. In contrast to the direct mapping between the virtual space and an equal-sized physical space that can be simply realized, the nonequivalent mapping that enables the user to explore a large virtual space by real walking within a confined physical space is complex. To address this issue, the redirected walking (RDW) technique is proposed by many works to adjust the user's virtual and physical movements based on some redirection manipulations. In this manner, subtle or overt motion deviations can be injected between the user's virtual and physical movements, allowing the user to undertake real walking in large virtual spaces by using different redirection controller methods. In this paper, we present a brief review to describe major concepts and methodologies in the field of redirected walking. First, we provide the fundamentals and basic criteria of RDW, and then we describe the redirection manipulations that can be applied to adjust the user's movements during virtual exploration. Furthermore, we clarify the redirection controller methods that properly adopt strategies for combining different redirection manipulations and present a classification of these methods by several categories. Finally, we summarize several experimental metrics to evaluate the performance of redirection controller methods and discuss current challenges and future work. Our study systematically classifies the relevant theories, concepts, and methods of RDW, and provides assistance to the newcomers in understanding and implementing the RDW technique.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Real walking techniques can provide the user with a more natural, highly immersive walking experience compared to the experience of other locomotion techniques. In contrast to the direct mapping between the virtual space and an equal-sized physical space that can be simply realized, the nonequivalent mapping that enables the user to explore a large virtual space by real walking within a confined physical space is complex. To address this issue, the redirected walking (RDW) technique is proposed by many works to adjust the user's virtual and physical movements based on some redirection manipulations. In this manner, subtle or overt motion deviations can be injected between the user's virtual and physical movements, allowing the user to undertake real walking in large virtual spaces by using different redirection controller methods. In this paper, we present a brief review to describe major concepts and methodologies in the field of redirected walking. First, we provide the fundamentals and basic criteria of RDW, and then we describe the redirection manipulations that can be applied to adjust the user's movements during virtual exploration. Furthermore, we clarify the redirection controller methods that properly adopt strategies for combining different redirection manipulations and present a classification of these methods by several categories. Finally, we summarize several experimental metrics to evaluate the performance of redirection controller methods and discuss current challenges and future work. Our study systematically classifies the relevant theories, concepts, and methods of RDW, and provides assistance to the newcomers in understanding and implementing the RDW technique.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Real walking techniques can provide the user with a more natural, highly immersive walking experience compared to the experience of other locomotion techniques. In contrast to the direct mapping between the virtual space and an equal-sized physical space that can be simply realized, the nonequivalent mapping that enables the user to explore a large virtual space by real walking within a confined physical space is complex. To address this issue, the redirected walking (RDW) technique is proposed by many works to adjust the user's virtual and physical movements based on some redirection manipulations. In this manner, subtle or overt motion deviations can be injected between the user's virtual and physical movements, allowing the user to undertake real walking in large virtual spaces by using different redirection controller methods. In this paper, we present a brief review to describe major concepts and methodologies in the field of redirected walking. First, we provide the fundamentals and basic criteria of RDW, and then we describe the redirection manipulations that can be applied to adjust the user's movements during virtual exploration. Furthermore, we clarify the redirection controller methods that properly adopt strategies for combining different redirection manipulations and present a classification of these methods by several categories. Finally, we summarize several experimental metrics to evaluate the performance of redirection controller methods and discuss current challenges and future work. Our study systematically classifies the relevant theories, concepts, and methods of RDW, and provides assistance to the newcomers in understanding and implementing the RDW technique.",
"title": "Redirected Walking for Exploring Immersive Virtual Spaces with HMD: A Comprehensive Review and Recent Advances",
"normalizedTitle": "Redirected Walking for Exploring Immersive Virtual Spaces with HMD: A Comprehensive Review and Recent Advances",
"fno": "09785918",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Legged Locomotion",
"Aerospace Electronics",
"Space Exploration",
"Visualization",
"Visual Perception",
"Tracking",
"Layout",
"Virtual Reality",
"Real Walking",
"Redirected Walking",
"Redirection",
"Locomotion"
],
"authors": [
{
"givenName": "Linwei",
"surname": "Fan",
"fullName": "Linwei Fan",
"affiliation": "School of Computer Science and Technology, Shandong University of Finance and Economics, Jinan, China, 250014",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huiyu",
"surname": "Li",
"fullName": "Huiyu Li",
"affiliation": "School of Management Science and Engineering, Shandong University of Finance and Economics, Jinan, China, 250014",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Miaowen",
"surname": "Shi",
"fullName": "Miaowen Shi",
"affiliation": "School of software, Shandong University, Jinan, China, 250101",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892373",
"title": "Application of redirected walking in room-scale VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892373/12OmNxG1ySA",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504742",
"title": "Simultaneous mapping and redirected walking for ad hoc free walking in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504742/12OmNyUFg0I",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446263",
"title": "Mobius Walker: Pitch and Roll Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446263/13bd1gJ1v07",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2018/7459/0/745900a115",
"title": "Rethinking Redirected Walking: On the Use of Curvature Gains Beyond Perceptual Limitations and Revisiting Bending Gains",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2018/745900a115/17D45WK5AlG",
"parentPublication": {
"id": "proceedings/ismar/2018/7459/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a922",
"title": "Robust Redirected Walking in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a922/1CJfaCP53nq",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09893374",
"title": "A Segmented Redirection Mapping Method for Roadmaps of Large Constrained Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09893374/1GGLIh8KmSA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10049511",
"title": "Redirected Walking On Omnidirectional Treadmill",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10049511/1KYoAYFd0m4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a053",
"title": "Redirected Walking Based on Historical User Walking Data",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a053/1MNgUnNG7Ju",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798118",
"title": "PReWAP: Predictive Redirected Walking Using Artificial Potential Fields",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798118/1cJ0XGXV02s",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a021",
"title": "OpenRDW: A Redirected Walking Library and Benchmark with Multi-User, Learning-based Functionalities and State-of-the-art Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a021/1yeD0KmODfO",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09784910",
"articleId": "1DPaE3QYx68",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09786815",
"articleId": "1DSumaVNxG8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1DIwTDMm7Mk",
"doi": "10.1109/TVCG.2022.3178237",
"abstract": "In real-time dynamic reconstruction, geometry and motion are the major focuses while appearance is not fully explored, leading to the low-quality appearance of the reconstructed surfaces. In this paper, we propose a lightweight lighting model that considers spatially varying lighting conditions caused by self-occlusion. This model estimates per-vertex masks on top of a single Spherical Harmonic (SH) lighting to represent spatially varying lighting conditions without adding too much computation cost. The mask is estimated based on the local geometry of a vertex to model the self-occlusion effect, which is the major reason leading to the spatial variation of lighting. Furthermore, to use this model in dynamic reconstruction, we also improve the motion estimation quality by adding a real-time per-vertex displacement estimation step. Experiments demonstrate that both the reconstructed appearance and the motion are largely improved compared with the current state-of-the-art techniques.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In real-time dynamic reconstruction, geometry and motion are the major focuses while appearance is not fully explored, leading to the low-quality appearance of the reconstructed surfaces. In this paper, we propose a lightweight lighting model that considers spatially varying lighting conditions caused by self-occlusion. This model estimates per-vertex masks on top of a single Spherical Harmonic (SH) lighting to represent spatially varying lighting conditions without adding too much computation cost. The mask is estimated based on the local geometry of a vertex to model the self-occlusion effect, which is the major reason leading to the spatial variation of lighting. Furthermore, to use this model in dynamic reconstruction, we also improve the motion estimation quality by adding a real-time per-vertex displacement estimation step. Experiments demonstrate that both the reconstructed appearance and the motion are largely improved compared with the current state-of-the-art techniques.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In real-time dynamic reconstruction, geometry and motion are the major focuses while appearance is not fully explored, leading to the low-quality appearance of the reconstructed surfaces. In this paper, we propose a lightweight lighting model that considers spatially varying lighting conditions caused by self-occlusion. This model estimates per-vertex masks on top of a single Spherical Harmonic (SH) lighting to represent spatially varying lighting conditions without adding too much computation cost. The mask is estimated based on the local geometry of a vertex to model the self-occlusion effect, which is the major reason leading to the spatial variation of lighting. Furthermore, to use this model in dynamic reconstruction, we also improve the motion estimation quality by adding a real-time per-vertex displacement estimation step. Experiments demonstrate that both the reconstructed appearance and the motion are largely improved compared with the current state-of-the-art techniques.",
"title": "A Self-occlusion Aware Lighting Model for Real-time Dynamic Reconstruction",
"normalizedTitle": "A Self-occlusion Aware Lighting Model for Real-time Dynamic Reconstruction",
"fno": "09783067",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Lighting",
"Surface Reconstruction",
"Real Time Systems",
"Geometry",
"Image Reconstruction",
"Computational Modeling",
"Shape",
"Albedo Reconstruction",
"3 D Dynamic Reconstruction",
"Spatially Varying Lighting",
"Real Time Reconstruction"
],
"authors": [
{
"givenName": "Chengwei",
"surname": "Zheng",
"fullName": "Chengwei Zheng",
"affiliation": "BNRist and school of software, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wenbin",
"surname": "Lin",
"fullName": "Wenbin Lin",
"affiliation": "BNRist and school of software, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Feng",
"surname": "Xu",
"fullName": "Feng Xu",
"affiliation": "BNRist and school of software, Tsinghua University, Beijing, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2015/8391/0/8391a846",
"title": "Photogeometric Scene Flow for High-Detail Dynamic 3D Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a846/12OmNAtst5T",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032d133",
"title": "Intrinsic3D: High-Quality 3D Reconstruction by Joint Appearance and Geometry Optimization with Spatially-Varying Lighting",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032d133/12OmNC4eSyL",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nbis/2016/0979/0/0979a492",
"title": "Three Dimensional Reconstruction from Single Uniform Texture Image with Unknown Lighting Conditions",
"doi": null,
"abstractUrl": "/proceedings-article/nbis/2016/0979a492/12OmNCd2rVL",
"parentPublication": {
"id": "proceedings/nbis/2016/0979/0",
"title": "2016 19th International Conference on Network-Based Information Systems (NBiS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2017/6067/0/08019481",
"title": "Near-surface lighting estimation and reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2017/08019481/12OmNzWx052",
"parentPublication": {
"id": "proceedings/icme/2017/6067/0",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/08/07321825",
"title": "Simultaneous Localization and Appearance Estimation with a Consumer RGB-D Camera",
"doi": null,
"abstractUrl": "/journal/tg/2016/08/07321825/13rRUyv53Fv",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/04/09678000",
"title": "Real-Time Lighting Estimation for Augmented Reality via Differentiable Screen-Space Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2023/04/09678000/1A4SuYWCI7K",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800a091",
"title": "A Lighting-Invariant Point Processor for Shading",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800a091/1m3nLzS0slW",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800c472",
"title": "Inverse Rendering for Complex Indoor Scenes: Shape, Spatially-Varying Lighting and SVBRDF From a Single Image",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800c472/1m3o03C864M",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800b147",
"title": "Precomputed Radiance Transfer for Reflectance and Lighting Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800b147/1qyxlpSwLhC",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900k0586",
"title": "Lighting, Reflectance and Geometry Estimation from 360° Panoramic Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900k0586/1yeIplXJ9wQ",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09781257",
"articleId": "1DDydDu6nPW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09784910",
"articleId": "1DPaE3QYx68",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1DQMaRxjwK4",
"name": "ttg555501-09783067s1-supp1-3178237.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09783067s1-supp1-3178237.mp4",
"extension": "mp4",
"size": "56.7 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1DDydDu6nPW",
"doi": "10.1109/TVCG.2022.3175364",
"abstract": "Visualization has the capacity of converting auditory perceptions of music into visual perceptions, which consequently opens the door to music visualization (e.g., exploring group style transitions and analyzing performance details). Current research either focuses on low-level analysis without constructing and comparing music group characteristics, or concentrates on high-level group analysis without analyzing and exploring detailed information. To fill this gap, integrating the high-level group analysis and low-level details exploration of music, we design a musical semantic sequence visualization analytics prototype system (MUSE) that mainly combines a distribution view and a semantic detail view, assisting analysts in obtaining the group characteristics and detailed interpretation. In the MUSE, we decompose the music into note sequences for modeling and abstracting music into three progressively fine-grained pieces of information (i.e., genres, instruments and notes). The distribution view integrates a new density contour, which considers sequence distance and semantic similarity, and helps analysts quickly identify the distribution features of the music group. The semantic detail view displays the music note sequences and combines the window moving to avoid visual clutter while ensuring the presentation of complete semantic details. To prove the usefulness and effectiveness of MUSE, we perform two case studies based on real-world music MIDI data. In addition, we conduct a quantitative user study and an expert evaluation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visualization has the capacity of converting auditory perceptions of music into visual perceptions, which consequently opens the door to music visualization (e.g., exploring group style transitions and analyzing performance details). Current research either focuses on low-level analysis without constructing and comparing music group characteristics, or concentrates on high-level group analysis without analyzing and exploring detailed information. To fill this gap, integrating the high-level group analysis and low-level details exploration of music, we design a musical semantic sequence visualization analytics prototype system (MUSE) that mainly combines a distribution view and a semantic detail view, assisting analysts in obtaining the group characteristics and detailed interpretation. In the MUSE, we decompose the music into note sequences for modeling and abstracting music into three progressively fine-grained pieces of information (i.e., genres, instruments and notes). The distribution view integrates a new density contour, which considers sequence distance and semantic similarity, and helps analysts quickly identify the distribution features of the music group. The semantic detail view displays the music note sequences and combines the window moving to avoid visual clutter while ensuring the presentation of complete semantic details. To prove the usefulness and effectiveness of MUSE, we perform two case studies based on real-world music MIDI data. In addition, we conduct a quantitative user study and an expert evaluation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visualization has the capacity of converting auditory perceptions of music into visual perceptions, which consequently opens the door to music visualization (e.g., exploring group style transitions and analyzing performance details). Current research either focuses on low-level analysis without constructing and comparing music group characteristics, or concentrates on high-level group analysis without analyzing and exploring detailed information. To fill this gap, integrating the high-level group analysis and low-level details exploration of music, we design a musical semantic sequence visualization analytics prototype system (MUSE) that mainly combines a distribution view and a semantic detail view, assisting analysts in obtaining the group characteristics and detailed interpretation. In the MUSE, we decompose the music into note sequences for modeling and abstracting music into three progressively fine-grained pieces of information (i.e., genres, instruments and notes). The distribution view integrates a new density contour, which considers sequence distance and semantic similarity, and helps analysts quickly identify the distribution features of the music group. The semantic detail view displays the music note sequences and combines the window moving to avoid visual clutter while ensuring the presentation of complete semantic details. To prove the usefulness and effectiveness of MUSE, we perform two case studies based on real-world music MIDI data. In addition, we conduct a quantitative user study and an expert evaluation.",
"title": "MUSE: Visual Analysis of Musical Semantic Sequence",
"normalizedTitle": "MUSE: Visual Analysis of Musical Semantic Sequence",
"fno": "09781257",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Music",
"Semantics",
"Visualization",
"Data Visualization",
"Instruments",
"Data Models",
"Sequences",
"Musical Semantic Sequence",
"Semantic Analysis",
"Temporal Sequence",
"Feature Extraction"
],
"authors": [
{
"givenName": "Baofeng",
"surname": "Chang",
"fullName": "Baofeng Chang",
"affiliation": "College of Computer Science and Technology, Zhejiang University of Technology, 12624 Hangzhou, Zhejiang, China, 310014",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guodao",
"surname": "Sun",
"fullName": "Guodao Sun",
"affiliation": "College of Computer Science and Technology, Zhejiang University of Technology, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tong",
"surname": "Li",
"fullName": "Tong Li",
"affiliation": "College of Computer Science and Technology, Zhejiang University of Technology, 12624 Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Houchao",
"surname": "Huang",
"fullName": "Houchao Huang",
"affiliation": "College of Information Engineering, Zhejiang University of Technology, 12624 Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ronghua",
"surname": "Liang",
"fullName": "Ronghua Liang",
"affiliation": "College of Computer Science and Technology, Zhejiang University of Technology, Hangzhou, Zhejiang, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/aciids/2009/3580/0/3580a167",
"title": "A Similar Music Retrieval Scheme Based on Musical Mood Variation",
"doi": null,
"abstractUrl": "/proceedings-article/aciids/2009/3580a167/12OmNBkxsv6",
"parentPublication": {
"id": "proceedings/aciids/2009/3580/0",
"title": "Intelligent Information and Database Systems, Asian Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/grc/2014/5464/0/06982846",
"title": "Semantic content-based music retrieval using audio and fuzzy-music-sense features",
"doi": null,
"abstractUrl": "/proceedings-article/grc/2014/06982846/12OmNBrDqEL",
"parentPublication": {
"id": "proceedings/grc/2014/5464/0",
"title": "2014 IEEE International Conference on Granular Computing (GrC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2015/7568/0/7568a056",
"title": "A Color-Based Visualization Approach to Understand Harmonic Structures of Musical Compositions",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2015/7568a056/12OmNvjyxYk",
"parentPublication": {
"id": "proceedings/iv/2015/7568/0",
"title": "2015 19th International Conference on Information Visualisation (iV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi-iat/2010/4191/1/4191a699",
"title": "Muzk Mesh: Interlinking Semantic Music Data",
"doi": null,
"abstractUrl": "/proceedings-article/wi-iat/2010/4191a699/12OmNvo67An",
"parentPublication": {
"id": "proceedings/wi-iat/2010/4191/1",
"title": "Web Intelligence and Intelligent Agent Technology, IEEE/WIC/ACM International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccc/2018/7241/0/724101a106",
"title": "MUSE Prototype for Music Sentiment Expression",
"doi": null,
"abstractUrl": "/proceedings-article/iccc/2018/724101a106/13xI8A0ZNjm",
"parentPublication": {
"id": "proceedings/iccc/2018/7241/0",
"title": "2018 IEEE International Conference on Cognitive Computing (ICCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2018/7202/0/720200a471",
"title": "MixMash: A Visualisation System for Musical Mashup Creation",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2018/720200a471/17D45XvMcd9",
"parentPublication": {
"id": "proceedings/iv/2018/7202/0",
"title": "2018 22nd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mmrp/2019/1649/0/08665378",
"title": "Semantic Web Technology for New Experiences Throughout the Music Production-Consumption Chain",
"doi": null,
"abstractUrl": "/proceedings-article/mmrp/2019/08665378/18qc9CbHT1e",
"parentPublication": {
"id": "proceedings/mmrp/2019/1649/0",
"title": "2019 International Workshop on Multilayer Music Representation and Processing (MMRP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a352",
"title": "Visualizing the Semantics of Music",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a352/1cMFaBDrMKA",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2021/01/09302731",
"title": "ClaviNet: Generate Music With Different Musical Styles",
"doi": null,
"abstractUrl": "/magazine/mu/2021/01/09302731/1pLFuFNNoxW",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2021/1865/0/186500a372",
"title": "Smart Portable Musical Simulation System Based on Unified Temperament",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2021/186500a372/1xPspwqBDUs",
"parentPublication": {
"id": "proceedings/mipr/2021/1865/0",
"title": "2021 IEEE 4th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09779957",
"articleId": "1DBTD2uB4di",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09783067",
"articleId": "1DIwTDMm7Mk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1DBTD2uB4di",
"doi": "10.1109/TVCG.2022.3176832",
"abstract": "Six degrees-of-freedom (6-DoF) video provides telepresence by enabling users to move around in the captured scene with a wide field of regard. Compared to methods requiring sophisticated camera setups, the image-based rendering method based on photogrammetry can work with images captured with any poses, which is more suitable for casual users. However, existing image-based-rendering methods are based on perspective images. When used to reconstruct 6-DoF views, it often requires capturing hundreds of images, making data capture a tedious and time-consuming process. In contrast to traditional perspective images, 360° images capture the entire surrounding view in a single shot, thus, providing a faster capturing process for 6-DoF view reconstruction. This paper presents a novel method to provide 6-DoF experiences over a wide area using an unstructured collection of 360° panoramas captured by a conventional 360° camera. Our method consists of 360° data capturing, novel depth estimation to produce a high-quality spherical depth panorama, and high-fidelity free-viewpoint generation. We compared our method against state-of-the-art methods, using data captured in various environments. Our method shows better visual quality and robustness in the tested scenes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Six degrees-of-freedom (6-DoF) video provides telepresence by enabling users to move around in the captured scene with a wide field of regard. Compared to methods requiring sophisticated camera setups, the image-based rendering method based on photogrammetry can work with images captured with any poses, which is more suitable for casual users. However, existing image-based-rendering methods are based on perspective images. When used to reconstruct 6-DoF views, it often requires capturing hundreds of images, making data capture a tedious and time-consuming process. In contrast to traditional perspective images, 360° images capture the entire surrounding view in a single shot, thus, providing a faster capturing process for 6-DoF view reconstruction. This paper presents a novel method to provide 6-DoF experiences over a wide area using an unstructured collection of 360° panoramas captured by a conventional 360° camera. Our method consists of 360° data capturing, novel depth estimation to produce a high-quality spherical depth panorama, and high-fidelity free-viewpoint generation. We compared our method against state-of-the-art methods, using data captured in various environments. Our method shows better visual quality and robustness in the tested scenes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Six degrees-of-freedom (6-DoF) video provides telepresence by enabling users to move around in the captured scene with a wide field of regard. Compared to methods requiring sophisticated camera setups, the image-based rendering method based on photogrammetry can work with images captured with any poses, which is more suitable for casual users. However, existing image-based-rendering methods are based on perspective images. When used to reconstruct 6-DoF views, it often requires capturing hundreds of images, making data capture a tedious and time-consuming process. In contrast to traditional perspective images, 360° images capture the entire surrounding view in a single shot, thus, providing a faster capturing process for 6-DoF view reconstruction. This paper presents a novel method to provide 6-DoF experiences over a wide area using an unstructured collection of 360° panoramas captured by a conventional 360° camera. Our method consists of 360° data capturing, novel depth estimation to produce a high-quality spherical depth panorama, and high-fidelity free-viewpoint generation. We compared our method against state-of-the-art methods, using data captured in various environments. Our method shows better visual quality and robustness in the tested scenes.",
"title": "Casual 6-DoF: free-viewpoint panorama using a handheld 360° camera",
"normalizedTitle": "Casual 6-DoF: free-viewpoint panorama using a handheld 360° camera",
"fno": "09779957",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Cameras",
"Estimation",
"Image Reconstruction",
"Rendering Computer Graphics",
"Media",
"Real Time Systems",
"Navigation",
"Do F",
"Reference View Synthesis",
"Free Viewpoint Images",
"Panoramic Depth Estimation"
],
"authors": [
{
"givenName": "Rongsen",
"surname": "Chen",
"fullName": "Rongsen Chen",
"affiliation": "School of Engineering and Computer Science, Victoria University of Wellington, 8491 Wellington, TeAro, New Zealand, 6140",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Fang-Lue",
"surname": "Zhang",
"fullName": "Fang-Lue Zhang",
"affiliation": "School of Engineering and Computer Science, Victoria University of Wellington, 8491 Wellington, Wellington, New Zealand",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Simon",
"surname": "Finnie",
"fullName": "Simon Finnie",
"affiliation": "School of Engineering and Computer Science, Victoria University of Wellington, 8491 Wellington, Te Aro, New Zealand",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Andrew",
"surname": "Chalmers",
"fullName": "Andrew Chalmers",
"affiliation": "Engineering and Computer Science, Victoria University of Wellington, Wellington, Wellington, New Zealand, 5014",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Taehyun",
"surname": "Rhee",
"fullName": "Taehyun Rhee",
"affiliation": "School of Engineering and Computer Science, Victoria University of Wellington, Wellington, Wellington, New Zealand, 6140",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892229",
"title": "6-DOF VR videos with a single 360-camera",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892229/12OmNAlvHtF",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08260946",
"title": "The Effect of Transition Type in Multi-View 360° Media",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08260946/13rRUxly8T4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08260916",
"title": "Parallax360: Stereoscopic 360° Scene Representation for Head-Motion Parallax",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08260916/13rRUyp7tX1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2018/4195/0/08551577",
"title": "Viewport-Driven Rate-Distortion Optimized Scalable Live 360° Video Network Multicast",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2018/08551577/17D45WZZ7Db",
"parentPublication": {
"id": "proceedings/icmew/2018/4195/0",
"title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08651483",
"title": "MegaParallax: Casual 360° Panoramas with Motion Parallax",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08651483/17WX571UbUk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08661657",
"title": "Motion parallax for 360° RGBD video",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08661657/18bmQqdj3Nu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798142",
"title": "Novel View Synthesis with Multiple 360 Images for Large-Scale 6-DOF Virtual Reality System",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798142/1cJ0QJtmuVW",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797922",
"title": "Freely Explore the Scene with 360° Field of View",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797922/1cJ0ZPNl4Qg",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798067",
"title": "OmniMR: Omnidirectional Mixed Reality with Spatially-Varying Environment Reflections from Moving 360° Video Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798067/1cJ1cnBEFb2",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093582",
"title": "360 Panorama Synthesis from a Sparse Set of Images with Unknown Field of View",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093582/1jPbrcnBX8s",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09780012",
"articleId": "1DBTCPd8JS8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09781257",
"articleId": "1DDydDu6nPW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1DGRZkfVzW0",
"name": "ttg555501-09779957s1-supp1-3176832.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09779957s1-supp1-3176832.pdf",
"extension": "pdf",
"size": "9.5 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1DBTCPd8JS8",
"doi": "10.1109/TVCG.2022.3176958",
"abstract": "Diminished Reality (DR) propagates pixels from a keyframe to subsequent frames for real-time inpainting. Keyframe selection has a significant impact on the inpainting quality, but untrained users struggle to identify good keyframes. Automatic selection is not straightforward either, since no previous work has formalized or verified what determines a good keyframe. We propose a novel metric to select good keyframes to inpaint. We examine the heuristics adopted in existing DR inpainting approaches and derive multiple simple criteria measurable from SLAM. To combine these criteria, we empirically analyze their effect on the quality using a novel representative test dataset. Our results demonstrate that the combined metric selects RGBD keyframes leading to high-quality inpainting results more often than a baseline approach in both color and depth domains. Also, we confirmed that our approach has a better ranking ability of distinguishing good and bad keyframes. Compared to random selections, our metric selects keyframes that would lead to higher-quality and more stably converging inpainting results. We present three DR examples, automatic keyframe selection, user navigation, and marker hiding.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Diminished Reality (DR) propagates pixels from a keyframe to subsequent frames for real-time inpainting. Keyframe selection has a significant impact on the inpainting quality, but untrained users struggle to identify good keyframes. Automatic selection is not straightforward either, since no previous work has formalized or verified what determines a good keyframe. We propose a novel metric to select good keyframes to inpaint. We examine the heuristics adopted in existing DR inpainting approaches and derive multiple simple criteria measurable from SLAM. To combine these criteria, we empirically analyze their effect on the quality using a novel representative test dataset. Our results demonstrate that the combined metric selects RGBD keyframes leading to high-quality inpainting results more often than a baseline approach in both color and depth domains. Also, we confirmed that our approach has a better ranking ability of distinguishing good and bad keyframes. Compared to random selections, our metric selects keyframes that would lead to higher-quality and more stably converging inpainting results. We present three DR examples, automatic keyframe selection, user navigation, and marker hiding.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Diminished Reality (DR) propagates pixels from a keyframe to subsequent frames for real-time inpainting. Keyframe selection has a significant impact on the inpainting quality, but untrained users struggle to identify good keyframes. Automatic selection is not straightforward either, since no previous work has formalized or verified what determines a good keyframe. We propose a novel metric to select good keyframes to inpaint. We examine the heuristics adopted in existing DR inpainting approaches and derive multiple simple criteria measurable from SLAM. To combine these criteria, we empirically analyze their effect on the quality using a novel representative test dataset. Our results demonstrate that the combined metric selects RGBD keyframes leading to high-quality inpainting results more often than a baseline approach in both color and depth domains. Also, we confirmed that our approach has a better ranking ability of distinguishing good and bad keyframes. Compared to random selections, our metric selects keyframes that would lead to higher-quality and more stably converging inpainting results. We present three DR examples, automatic keyframe selection, user navigation, and marker hiding.",
"title": "Good Keyframes to Inpaint",
"normalizedTitle": "Good Keyframes to Inpaint",
"fno": "09780012",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Measurement",
"Cameras",
"Three Dimensional Displays",
"Simultaneous Localization And Mapping",
"Real Time Systems",
"Image Reconstruction",
"Streaming Media",
"Diminished Reality",
"Inpainting",
"Keyframe",
"Good Keyframes To Inpaint",
"SLAM"
],
"authors": [
{
"givenName": "Shohei",
"surname": "Mori",
"fullName": "Shohei Mori",
"affiliation": "Institute of Computer Graphics and Vision, Graz University of Technology, 27253 Graz, Steiermark, Austria, 8010",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dieter",
"surname": "Schmalstieg",
"fullName": "Dieter Schmalstieg",
"affiliation": "Institute of Computer Graphics and Vision, Graz University of Technology, 27253 Graz, Steiermark, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Denis",
"surname": "Kalkofen",
"fullName": "Denis Kalkofen",
"affiliation": "Institute of Computer Graphics and Vision, Graz University of Technology, 27253 Graz, Steiermark, Austria",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2016/0641/0/07477633",
"title": "Mono camera multi-view diminished reality",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2016/07477633/12OmNvKePGX",
"parentPublication": {
"id": "proceedings/wacv/2016/0641/0",
"title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671783",
"title": "Handling pure camera rotation in keyframe-based SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671783/12OmNvmG7YF",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2015/7079/0/07169765",
"title": "What are the salient keyframes in short casual videos? an extensive user study using a new video dataset",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2015/07169765/12OmNwxlrdP",
"parentPublication": {
"id": "proceedings/icmew/2015/7079/0",
"title": "2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/05/06912003",
"title": "Real-Time RGB-D Camera Relocalization via Randomized Ferns for Keyframe Encoding",
"doi": null,
"abstractUrl": "/journal/tg/2015/05/06912003/13rRUwInvsU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg201404531",
"title": "Global Localization from Monocular SLAM on a Mobile Phone",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404531/13rRUwdrdSA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000c560",
"title": "CodeSLAM - Learning a Compact, Optimisable Representation for Dense Visual SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000c560/17D45VUZMVf",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a308",
"title": "Online Adaptive Integration of Observation and Inpainting for Diminished Reality with Online Surface Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a308/1J7Wkijm8Yo",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a594",
"title": "On the Redundancy Detection in Keyframe-Based SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a594/1ezRCsrH9Be",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/4.803E306",
"title": "Unsupervised Collaborative Learning of Keyframe Detection and Visual Odometry Towards Monocular Deep SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/4.803E306/1hQqtAaoUes",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/10/09184389",
"title": "InpaintFusion: Incremental RGB-D Inpainting for 3D Scenes",
"doi": null,
"abstractUrl": "/journal/tg/2020/10/09184389/1mLIesC5z0Y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09779066",
"articleId": "1DvgD0GMunm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09779957",
"articleId": "1DBTD2uB4di",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1DvgD0GMunm",
"doi": "10.1109/TVCG.2022.3175626",
"abstract": "Physicians work at a very tight schedule and need decision-making support tools to help on improving and doing their work in a timely and dependable manner. Examining piles of sheets with test results and using systems with little visualization support to provide diagnostics is daunting, but that is still the usual way for the physicians' daily procedure, especially in developing countries. Electronic Health Records systems have been designed to keep the patients' history and reduce the time spent analyzing the patient's data. However, better tools to support decision-making are still needed. In this paper, we propose ClinicalPath, a visualization tool for users to track a patient's clinical path through a series of tests and data, which can aid in treatments and diagnoses. Our proposal is focused on patient's data analysis, presenting the test results and clinical history longitudinally. Both the visualization design and the system functionality were developed in close collaboration with experts in the medical domain to ensure a right fit of the technical solutions and the real needs of the professionals. We validated the proposed visualization based on case studies and user assessments through tasks based on the physician's daily activities. Our results show that our proposed system improves the physicians' experience in decision-making tasks, made with more confidence and better usage of the physicians' time, allowing them to take other needed care for the patients.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Physicians work at a very tight schedule and need decision-making support tools to help on improving and doing their work in a timely and dependable manner. Examining piles of sheets with test results and using systems with little visualization support to provide diagnostics is daunting, but that is still the usual way for the physicians' daily procedure, especially in developing countries. Electronic Health Records systems have been designed to keep the patients' history and reduce the time spent analyzing the patient's data. However, better tools to support decision-making are still needed. In this paper, we propose ClinicalPath, a visualization tool for users to track a patient's clinical path through a series of tests and data, which can aid in treatments and diagnoses. Our proposal is focused on patient's data analysis, presenting the test results and clinical history longitudinally. Both the visualization design and the system functionality were developed in close collaboration with experts in the medical domain to ensure a right fit of the technical solutions and the real needs of the professionals. We validated the proposed visualization based on case studies and user assessments through tasks based on the physician's daily activities. Our results show that our proposed system improves the physicians' experience in decision-making tasks, made with more confidence and better usage of the physicians' time, allowing them to take other needed care for the patients.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Physicians work at a very tight schedule and need decision-making support tools to help on improving and doing their work in a timely and dependable manner. Examining piles of sheets with test results and using systems with little visualization support to provide diagnostics is daunting, but that is still the usual way for the physicians' daily procedure, especially in developing countries. Electronic Health Records systems have been designed to keep the patients' history and reduce the time spent analyzing the patient's data. However, better tools to support decision-making are still needed. In this paper, we propose ClinicalPath, a visualization tool for users to track a patient's clinical path through a series of tests and data, which can aid in treatments and diagnoses. Our proposal is focused on patient's data analysis, presenting the test results and clinical history longitudinally. Both the visualization design and the system functionality were developed in close collaboration with experts in the medical domain to ensure a right fit of the technical solutions and the real needs of the professionals. We validated the proposed visualization based on case studies and user assessments through tasks based on the physician's daily activities. Our results show that our proposed system improves the physicians' experience in decision-making tasks, made with more confidence and better usage of the physicians' time, allowing them to take other needed care for the patients.",
"title": "ClinicalPath: a Visualization tool to Improve the Evaluation of Electronic Health Records in Clinical Decision-Making",
"normalizedTitle": "ClinicalPath: a Visualization tool to Improve the Evaluation of Electronic Health Records in Clinical Decision-Making",
"fno": "09779066",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Task Analysis",
"Medical Services",
"History",
"Visualization",
"Medical Diagnostic Imaging",
"Decision Making",
"Information Visualization",
"Interactive Visualizations",
"Human Computer Interaction",
"Electronic Health Records"
],
"authors": [
{
"givenName": "Claudio D. G.",
"surname": "Linhares",
"fullName": "Claudio D. G. Linhares",
"affiliation": "Institute of Mathematics and Computer Sciences, University of Sao Paulo Campus of Sao Carlos, 42512 Sao Carlos, São Paulo, Brazil",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Daniel M.",
"surname": "Lima",
"fullName": "Daniel M. Lima",
"affiliation": "Institute of Mathematics and Computer Sciences, University of Sao Paulo Campus of Sao Carlos, 42512 Sao Carlos, São Paulo, Brazil",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jean R.",
"surname": "Ponciano",
"fullName": "Jean R. Ponciano",
"affiliation": "School of Applied Mathematics, Fundacao Getulio Vargas, 42500 Rio de Janeiro, RJ, Brazil",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mauro M.",
"surname": "Olivatto",
"fullName": "Mauro M. Olivatto",
"affiliation": "Graduate Course in Medicine, Universidade Federal da Fronteira Sul, 232192 Chapeco, Santa Catarina, Brazil",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Marco A.",
"surname": "Gutierrez",
"fullName": "Marco A. Gutierrez",
"affiliation": "Laboratorio de Informatica Biomedica, Universidade de Sao Paulo Instituto do Coracao, 42523 Sao Paulo, São Paulo, Brazil",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jorge",
"surname": "Poco",
"fullName": "Jorge Poco",
"affiliation": "School of Applied Mathematics, Fundacao Getulio Vargas, 42500 Rio de Janeiro, Rio de Janeiro, Brazil, 22250-900",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Caetano",
"surname": "Traina",
"fullName": "Caetano Traina",
"affiliation": "Computer Science, University of Sao Paulo Campus of Sao Carlos, 42512 Sao Carlos, São Paulo, Brazil",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Agma Juci Machado",
"surname": "Traina",
"fullName": "Agma Juci Machado Traina",
"affiliation": "Institute of Mathematics and Computer Sciences, University of Sao Paulo, Sao Carlos, Brazil.",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/bibe/2001/1423/0/00974425",
"title": "Medical decision-making and collaborative reasoning",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2001/00974425/12OmNqGRGdS",
"parentPublication": {
"id": "proceedings/bibe/2001/1423/0",
"title": "Proceedings 2nd Annual IEEE International Symposium on Bioinformatics and Bioengineering (BIBE 2001)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2011/1799/0/06120478",
"title": "Preserving Narratives in Electronic Health Records",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2011/06120478/12OmNvqEvIv",
"parentPublication": {
"id": "proceedings/bibm/2011/1799/0",
"title": "2011 IEEE International Conference on Bioinformatics and Biomedicine",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsc/2016/0662/0/0662a321",
"title": "Visualization of Pain Severity Events in Clinical Records Using Semantic Structures",
"doi": null,
"abstractUrl": "/proceedings-article/icsc/2016/0662a321/12OmNwB2dUS",
"parentPublication": {
"id": "proceedings/icsc/2016/0662/0",
"title": "2016 IEEE Tenth International Conference on Semantic Computing (ICSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2014/4435/0/4435a497",
"title": "Need and Requirements Elicitation for Electronic Access to Patient's Medication History in the Emergency Department",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2014/4435a497/12OmNwEJ13T",
"parentPublication": {
"id": "proceedings/cbms/2014/4435/0",
"title": "2014 IEEE 27th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vahc/2017/3187/0/08387501",
"title": "A timeline-based framework for aggregating and summarizing electronic health records",
"doi": null,
"abstractUrl": "/proceedings-article/vahc/2017/08387501/12OmNyYDDIU",
"parentPublication": {
"id": "proceedings/vahc/2017/3187/0",
"title": "2017 IEEE Workshop on Visual Analytics in Healthcare (VAHC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2011/0868/0/06004004",
"title": "Adaptive Visual Symbols for Personal Health Records",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2011/06004004/12OmNzVGcC4",
"parentPublication": {
"id": "proceedings/iv/2011/0868/0",
"title": "2011 15th International Conference on Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scamc/1983/0503/0/00764817",
"title": "Using experience to improve clinical decision making",
"doi": null,
"abstractUrl": "/proceedings-article/scamc/1983/00764817/12OmNzX6cgy",
"parentPublication": {
"id": "proceedings/scamc/1983/0503/0",
"title": "1983 The Seventh Annual Symposium on Computer Applications in Medical Care",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/sp/2013/06/msp2013060012",
"title": "Nonconfidential Patient Types in Emergency Clinical Decision Support",
"doi": null,
"abstractUrl": "/magazine/sp/2013/06/msp2013060012/13rRUwbaqK1",
"parentPublication": {
"id": "mags/sp",
"title": "IEEE Security & Privacy",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440832",
"title": "<italic>Doccurate</italic>: A Curation-Based Approach for Clinical Text Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440832/17D45WHONqh",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a392",
"title": "Visualization of Histopathological Decision Making Using a Roadbook Metaphor",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a392/1cMF8urE6vS",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09779102",
"articleId": "1DvgCK2YAyQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09780012",
"articleId": "1DBTCPd8JS8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1DwUCNxldcs",
"name": "ttg555501-09779066s1-supp1-3175626.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09779066s1-supp1-3175626.pdf",
"extension": "pdf",
"size": "190 kB",
"__typename": "WebExtraType"
},
{
"id": "1DwUCFIwl2g",
"name": "ttg555501-09779066s1-supp2-3175626.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09779066s1-supp2-3175626.mp4",
"extension": "mp4",
"size": "45 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |