From 2cfc47476ec464fb1192d418b6963fda2dea8deb Mon Sep 17 00:00:00 2001
From: zw <26880977+zw615@users.noreply.github.com>
Date: Mon, 17 Jun 2024 14:51:06 +0800
Subject: [PATCH] update 2024/06/16 2
---
authors/admin/index.html | 2 +-
index.html | 11 +++++++++--
index.json | 2 +-
3 files changed, 11 insertions(+), 4 deletions(-)
diff --git a/authors/admin/index.html b/authors/admin/index.html
index d65c085..fb72ae0 100755
--- a/authors/admin/index.html
+++ b/authors/admin/index.html
@@ -599,7 +599,7 @@
Prof. Cihang Xie.
Before this, I received my M.S. degree at Institute of Computing Technology, Chinese Academy of Science in 2021,
and my B.E. degree at Central South University in 2018.
-I am a incoming intern at
+I am an incoming intern at
LLNL.
have also spent wonderful times at
QCraft and
diff --git a/index.html b/index.html
index f9ae9f8..2f23129 100755
--- a/index.html
+++ b/index.html
@@ -650,7 +650,7 @@
Prof. Cihang Xie.
Before this, I received my M.S. degree at Institute of Computing Technology, Chinese Academy of Science in 2021,
and my B.E. degree at Central South University in 2018.
-I am a incoming intern at
+I am an incoming intern at
LLNL.
have also spent wonderful times at
QCraft and
@@ -2326,7 +2326,14 @@ Teaching Assistant
Reviewer
-- AAAI 2022, NeuRIPS 2022-2023, ICLR2023-2024, CVPR2023-2024, ICML2023, ICCV2023, WACV2024, TPAMI, TCSVT
+- Conference reviewer: AAAI 2022, NeuRIPS 2022-2024, ICLR2023-2024, CVPR2023-2024, ICML2023-2024, ICCV2023, WACV2024
+- Journal Reviewer:
+
+- IEEE Transactions on Pattern Analysis and Machine Intelligence
+- International Journal of Computer Vision
+- IEEE Transactions on Circuits and Systems for Video Technology
+
+
diff --git a/index.json b/index.json
index 75910b9..798ebb2 100755
--- a/index.json
+++ b/index.json
@@ -1 +1 @@
-[{"authors":["admin"],"categories":null,"content":"I am a 3rd-year PhD student at UC Santa Cruz, supervised by Prof. Cihang Xie. Before this, I received my M.S. degree at Institute of Computing Technology, Chinese Academy of Science in 2021, and my B.E. degree at Central South University in 2018. I am a incoming intern at LLNL. have also spent wonderful times at QCraft and Megvii (Face++).\nMy research interest lies in the intersection of computer vision and machine learning.\n","date":-62135596800,"expirydate":-62135596800,"kind":"taxonomy","lang":"en","lastmod":1705214261,"objectID":"2525497d367e79493fd32b198b28f040","permalink":"https://zw615.github.io/authors/admin/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/authors/admin/","section":"authors","summary":"I am a 3rd-year PhD student at UC Santa Cruz, supervised by Prof. Cihang Xie. Before this, I received my M.S. degree at Institute of Computing Technology, Chinese Academy of Science in 2021, and my B.E. degree at Central South University in 2018. I am a incoming intern at LLNL. have also spent wonderful times at QCraft and Megvii (Face++).\nMy research interest lies in the intersection of computer vision and machine learning.","tags":null,"title":"Zeyu Wang","type":"authors"},{"authors":["**Zeyu Wang***","Xianhang Li*","Hongru Zhu","Cihang Xie"],"categories":[],"content":" ","date":1704758400,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1705214261,"objectID":"7806ab942d186cb1c4a0c898538c29e8","permalink":"https://zw615.github.io/publication/advxl/","publishdate":"2024-01-09T00:00:00Z","relpermalink":"/publication/advxl/","section":"publication","summary":" ","tags":[],"title":"Revisiting Adversarial Training at Scale","type":"publication"},{"authors":["Sucheng Ren*","**Zeyu Wang***","Hongru Zhu","Junfei Xiao","Alan Yuille","Cihang Xie"],"categories":[],"content":" ","date":1701648000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1705214261,"objectID":"a01217a018796f75693d2af369aa6c06","permalink":"https://zw615.github.io/publication/digpt/","publishdate":"2023-12-04T00:00:00Z","relpermalink":"/publication/digpt/","section":"publication","summary":" ","tags":[],"title":"Rejuvenating image-GPT as Strong Visual Representation Learners","type":"publication"},{"authors":["Yipeng Gao","**Zeyu Wang**","Wei-Shi Zheng","Cihang Xie","Yuyin Zhou"],"categories":[],"content":" ","date":1698969600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1698969600,"objectID":"4753f81cd0f271d2202d48b83532c76a","permalink":"https://zw615.github.io/publication/mixcon3d/","publishdate":"2023-11-03T00:00:00Z","relpermalink":"/publication/mixcon3d/","section":"publication","summary":" ","tags":[],"title":"Sculpting Holistic 3D Representation in Contrastive Language-Image-3D Pre-training","type":"publication"},{"authors":["Peiran Xu*","**Zeyu Wang***","Jieru Mei","Liangqiong Qu","Alan Yuille","Cihang Xie","Yuyin Zhou"],"categories":[],"content":" ","date":1696550400,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1696550400,"objectID":"604c2da5d1fb768b8f281d488013de5f","permalink":"https://zw615.github.io/publication/fedconv/","publishdate":"2023-10-06T00:00:00Z","relpermalink":"/publication/fedconv/","section":"publication","summary":" ","tags":[],"title":"FedConv: Enhancing Convolutional Neural Networks for Handling Data Heterogeneity in Federated Learning","type":"publication"},{"authors":["**Zeyu Wang***","Dingwen Li*","Chenxu Luo","Cihang Xie","Xiaodong Yang"],"categories":[],"content":" ","date":1695686400,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1705214261,"objectID":"cc3fdf098f73d9ab8f1e2dacc85c102f","permalink":"https://zw615.github.io/publication/distillbev/","publishdate":"2023-09-26T00:00:00Z","relpermalink":"/publication/distillbev/","section":"publication","summary":" ","tags":[],"title":"DistillBEV: Boosting Multi-Camera 3D Object Detection with Cross-Modal Knowledge Distillation","type":"publication"},{"authors":["Xianhang Li*","**Zeyu Wang***","Cihang Xie"],"categories":[],"content":" ","date":1683763200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1705214261,"objectID":"904c92b072bcc8161ca2b53d10a12792","permalink":"https://zw615.github.io/publication/clipa/","publishdate":"2023-05-11T00:00:00Z","relpermalink":"/publication/clipa/","section":"publication","summary":" ","tags":[],"title":"An Inverse Scaling Law for CLIP Training","type":"publication"},{"authors":["Shaoyuan Xie","Zichao Li","**Zeyu Wang**","Cihang Xie"],"categories":[],"content":" ","date":1674604800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1705214261,"objectID":"e83a35a1979d4259cc0833db461fbc44","permalink":"https://zw615.github.io/publication/bevrobustness/","publishdate":"2023-01-25T00:00:00Z","relpermalink":"/publication/bevrobustness/","section":"publication","summary":" ","tags":[],"title":"On the Adversarial Robustness of Camera-based 3D Object Detection","type":"publication"},{"authors":["Leilani H. Gilpin","Filip Ilievski"],"categories":null,"content":"","date":1664533800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"8c225d3756c1aa95aa62887ff12af214","permalink":"https://zw615.github.io/talk/us2ts-2022/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/us2ts-2022/","section":"talk","summary":"US2TS KG and XAI tutorial with Filip Ilievski","tags":["tutorial"],"title":"Knowledge-based commonsense reasoning and explainability","type":"talk"},{"authors":["Leilani H. Gilpin","Razvan V. Marinescu"],"categories":null,"content":"","date":1662634800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"58058ca4bee201a9e671a4e280c82ffc","permalink":"https://zw615.github.io/talk/birmingham-2022/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/birmingham-2022/","section":"talk","summary":"Joint School of Computer Science Seminar with Razvan Marinescu","tags":[],"title":"Accountability Layers","type":"talk"},{"authors":["Leilani H. Gilpin","Razvan V. Marinescu"],"categories":null,"content":"","date":1662512400,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"e80af49ed8c6bfb8703ce554df81fe6a","permalink":"https://zw615.github.io/talk/ucl-2022/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/ucl-2022/","section":"talk","summary":"Joint CMIC/WEISS + AI Centre Joint Seminar with Razvan Marinescu","tags":[],"title":"Accountability Layers","type":"talk"},{"authors":["Yutong Bai","**Zeyu Wang**","Junfei Xiao","Chen Wei","Huiyu Wang","Alan Yuille","Yuyin Zhou","Cihang Xie"],"categories":[],"content":" ","date":1661385600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1705214261,"objectID":"ec04330db1479a868fe58166884cc43a","permalink":"https://zw615.github.io/publication/dmae/","publishdate":"2022-08-25T00:00:00Z","relpermalink":"/publication/dmae/","section":"publication","summary":" ","tags":[],"title":"Masked Autoencoders Enable Efficient Knowledge Distillers","type":"publication"},{"authors":["**Zeyu Wang**","Yutong Bai","Yuyin Zhou","Cihang Xie"],"categories":[],"content":" ","date":1654560000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1705214261,"objectID":"d98cdbd18281c902fb31f26985249c94","permalink":"https://zw615.github.io/publication/robustcnn/","publishdate":"2022-06-07T00:00:00Z","relpermalink":"/publication/robustcnn/","section":"publication","summary":" ","tags":[],"title":"Can CNNs Be More Robust Than Transformers?","type":"publication"},{"authors":["Leilani H. Gilpin"],"categories":null,"content":"","date":1637229000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"e7aa206e92dbe580ceec42d0e54ea83e","permalink":"https://zw615.github.io/talk/cse-200-2021/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/cse-200-2021/","section":"talk","summary":"Research overview for CSE 200.","tags":[],"title":"Explaining Errors in Complex Systems","type":"talk"},{"authors":["Leilani H. Gilpin"],"categories":null,"content":"","date":1636588800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"dc2da9b96a213919c61f2d34547fe17d","permalink":"https://zw615.github.io/talk/jhu-case-2021/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/jhu-case-2021/","section":"talk","summary":"Autonomous systems are prone to errors and failures without knowing why. In critical domains like driving, these autonomous counterparts must be able to recount their actions for safety, liability, and trust. An explanation: a model-dependent reason or justification for the decision of the autonomous agent being assessed, is a key component for post-mortem failure analysis, but also for pre-deployment verification. I will show a monitoring framework that uses a model and commonsense knowledge to detect and explain unreasonable vehicle scenarios, even if it has not seen that error before.\nIn the second part of the talk, I will motivate the explanations as a testing framework for autonomous systems. While it is important to develop realistic tests in simulation, simulation is not always representative of the corner cases in the real world. I will show how to use explanations in a feedback loop. The explanation ensures that the machine has done the right thing or it exploits a stressor to be modified and tested moving forward. I will conclude by discussing new challenges at the intersection of XAI and autonomy towards autonomous systems that are explainable by design","tags":[],"title":"Explaining Errors in Autonomous Driving: A Diagnosis Tool and Testing Framework for Robust Decision Making","type":"talk"},{"authors":["Leilani H. Gilpin"],"categories":null,"content":"","date":1636070400,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"4f4f3b14f4bccd348efdeaef00c42076","permalink":"https://zw615.github.io/talk/cogsat-2021/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/cogsat-2021/","section":"talk","summary":"","tags":[],"title":"Perception Challenge for Autonomous Vehicles","type":"talk"},{"authors":["Leilani H. Gilpin"],"categories":null,"content":"","date":1635724800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"edd519cbd3bd3f8b2c25a9c8e54e97a2","permalink":"https://zw615.github.io/talk/recruiting/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/recruiting/","section":"talk","summary":"","tags":[],"title":"I'm Recruiting PhD Students","type":"talk"},{"authors":["Leilani H. Gilpin"],"categories":null,"content":"","date":1626220800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"962aa0612c86dbb0c43cd6d5f7a615d0","permalink":"https://zw615.github.io/talk/fuzz-ieee-2021/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/fuzz-ieee-2021/","section":"talk","summary":"FUZZ-IEEE invited talk","tags":[],"title":"Anomaly Detection Through Explanations","type":"talk"},{"authors":["Leilani H. Gilpin"],"categories":null,"content":"","date":1620813000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"94e25239951e30de49c846db6186cc82","permalink":"https://zw615.github.io/talk/nw-xai-2021/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/nw-xai-2021/","section":"talk","summary":"Guest Lecture in CS 496 (AI Perspectives)","tags":[],"title":"Explaining Explanations in AI","type":"talk"},{"authors":["Leilani H. Gilpin"],"categories":null,"content":"","date":1607385600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"27a61a3e41839c63d17b1a21aa934eeb","permalink":"https://zw615.github.io/talk/neurips-phd/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/neurips-phd/","section":"talk","summary":"In this talk, I present new methodologies for detecting and explaining errors in complex systems. My novel contribution is a system-wide monitoring architecture, which is composed of introspective, overlapping committees of subsystems. Each subsystem is encapsulated in a reasonableness monitor, an adaptable framework that supplements local decisions with commonsense data and reasonableness rules. This framework is dynamic and introspective: it allows each subsystem to defend its decisions in different contexts; to the committees it participates in and to itself. For reconciling system-wide errors, I developed a comprehensive architecture that I call Anomaly Detection through Explanations (ADE). The ADE architecture contributes an explanation synthesizer that produces an argument tree, which in turn can be traced and queried to determine the support of a decision, and to construct counterfactual explanations. I have applied this methodology to detect incorrect labels in semi-autonomous vehicle data, and to reconcile inconsistencies in simulated, anomalous driving scenarios.","tags":[],"title":"Identifying Multimodal Errors Through Explanations","type":"talk"},{"authors":["Leilani H. Gilpin"],"categories":null,"content":"","date":1591833600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"8db5f11e3817dfe3673ecbbfaa458ab5","permalink":"https://zw615.github.io/talk/defense/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/defense/","section":"talk","summary":"","tags":[],"title":"Anomaly Detection Through Explanations","type":"talk"},{"authors":["Leilani H. Gilpin"],"categories":null,"content":"","date":1588636800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"038805dac385c300618bb2e706da8f64","permalink":"https://zw615.github.io/talk/cs520-xai/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/cs520-xai/","section":"talk","summary":"","tags":[],"title":"Explaining Explanations","type":"talk"},{"authors":["Leilani H. Gilpin"],"categories":null,"content":"","date":1583452800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"1c471eed23dce10746c42a55986adbb1","permalink":"https://zw615.github.io/talk/wids-2020/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/wids-2020/","section":"talk","summary":"","tags":[],"title":"Explanation-based Anomaly Detection","type":"talk"},{"authors":null,"categories":["recruiting"],"content":"I am seeking creative, hard-working, PhD students (and master\u0026rsquo;s students and undergraduates) to join the XAI group at UCSC.\nCome work with me on making complex mechanisms more understandable for debugging and diagnosis, accountability, and liability. We examine applications in autonomous driving, deep learning, and other mission-critical or safety-critical applications. Additionally, my work also examines the role of explanations for trustworthy ML, human-machine teams, AI and ethics, and AI safety and fairness.\nUCSC undergrad UCSC undergraduates are welcome to contact me about getting involved in research. Students interested in joining the group should have basic programming experience, and preferably, have taken CSE 140.\nUCSC Master\u0026rsquo;s students Current UCSC Master\u0026rsquo;s students interested in gaining research experience or looking for a supervisor for a research project may contact me.\nIn your introductory email, please include a CV/resume, and indicate a few topics or application domains of interest. I\u0026rsquo;ve listed a couple ongoing projects on the UCSC CSE master\u0026rsquo;s board.\nProspective PhD students I\u0026rsquo;m currently looking to build the XAI group with driven, hard-working PhD students. Students will work on various projects with a focus on autonomous systems, robust machine learning, and/or explainability and interpretability.\nIn your introductory email, please include a CV/resume, and indicate a few topics or application domains of interest. (For inspiration, some ongoing projects are listed here, but feel free to define your own!).\nBefore contacting Prospective PhD students, please review the UC Santa Cruz CSE PhD admissions requirements and ensure that you meet these requirements before contacting Prof. Gilpin. (Note: the GRE is no longer required!) I can only take on students that are accepted to a PhD program at UCSC.\nI\u0026rsquo;m unable to provide insights about admissions decisions. The admission decisions are made by a committee.\nDiversity statement Explanatory AI is an interdisciplinary field which benefits from the perspectives and contributions of those from diverse voices (diversity broadly defined). We encourage anyone who shares our interests, including members of historically underrepresented and marginalized groups, women, LGBTQ+ folks, and people with disabilities (with diversity broadly defined), to consider joining us at UC Santa Cruz!\n","date":1583325625,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"9622d7b23a4b35bfd66b060883eaf544","permalink":"https://zw615.github.io/post/recruiting/","publishdate":"2020-03-04T13:40:25+01:00","relpermalink":"/post/recruiting/","section":"post","summary":"I am seeking creative, hard-working, PhD students (and master\u0026rsquo;s students and undergraduates) to join the XAI group at UCSC.\nCome work with me on making complex mechanisms more understandable for debugging and diagnosis, accountability, and liability. We examine applications in autonomous driving, deep learning, and other mission-critical or safety-critical applications. Additionally, my work also examines the role of explanations for trustworthy ML, human-machine teams, AI and ethics, and AI safety and fairness.","tags":null,"title":"I'm Recruiting PhD students","type":"post"},{"authors":["Leilani H. Gilpin"],"categories":null,"content":"","date":1577836800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"10ebfafa4090e00b7332dc365e125b6e","permalink":"https://zw615.github.io/talk/csail-student-profile/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/csail-student-profile/","section":"talk","summary":"","tags":[],"title":"CSAIL Student Profile","type":"talk"},{"authors":["Yinda Xu*","**Zeyu Wang***","Zuoxin Li","Ye Yuan","Gang Yu"],"categories":[],"content":" ","date":1573689600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1705214261,"objectID":"166e891aab34c7e0a565b1b8347c20be","permalink":"https://zw615.github.io/publication/siamfc++/","publishdate":"2019-11-14T00:00:00Z","relpermalink":"/publication/siamfc++/","section":"publication","summary":" ","tags":[],"title":"Siamfc++: Towards robust and accurate visual tracking with target estimation guidelines","type":"publication"},{"authors":[],"categories":null,"content":"","date":1571835600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"60e8404d0ccc821b7a2d3328470d8e32","permalink":"https://zw615.github.io/talk/ucsd-datascience-2019/","publishdate":"2020-02-22T14:54:47-05:00","relpermalink":"/talk/ucsd-datascience-2019/","section":"talk","summary":"How to use explanations to make more trusted, accountable (and even ethical) decisions.","tags":[],"title":"Using Explanations for Robust Autonomous Decision Making","type":"talk"},{"authors":null,"categories":null,"content":"","date":1554043848,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"30eec81ebf82bde10e6d5e440a8e3a72","permalink":"https://zw615.github.io/project/ai-ethics/","publishdate":"2019-03-31T10:50:48-04:00","relpermalink":"/project/ai-ethics/","section":"project","summary":"The AI and ethics reading group is a student-lead, campus-wide initiative.","tags":[],"title":"AI and ethics","type":"project"},{"authors":null,"categories":null,"content":"","date":1554043848,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"d70469283eb79839d3f81926cdef348c","permalink":"https://zw615.github.io/project/explanatory-games/","publishdate":"2019-03-31T10:50:48-04:00","relpermalink":"/project/explanatory-games/","section":"project","summary":"Using internal symbolic, explanatory representations to robustly monitor agents.","tags":[],"title":"Explanatory Games","type":"project"},{"authors":null,"categories":null,"content":"","date":1554043848,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"f2a7de495cb71f5501384c1c6276ddd2","permalink":"https://zw615.github.io/project/reasonableness-monitors/","publishdate":"2019-03-31T10:50:48-04:00","relpermalink":"/project/reasonableness-monitors/","section":"project","summary":"An adaptable framework to supplement decision making systems with commonsense knowledge and reasonableness rules.","tags":[],"title":"Monitoring Decision Systems","type":"project"},{"authors":null,"categories":null,"content":"","date":1554043848,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"c31658ef40e70de1eda68c21309afca4","permalink":"https://zw615.github.io/project/car-can-explain/","publishdate":"2019-03-31T10:50:48-04:00","relpermalink":"/project/car-can-explain/","section":"project","summary":"The methdologies and underlying technologies to allow self-driving cars and other AI-driven systems to explain behaviors and failures.","tags":[],"title":"The Car Can Explain!","type":"project"},{"authors":["Andrew Sinclair","Jeremiah Foster","Leilani H. Gilpin","Daniel Patnaik","moderated by Eben Moglen","and Mishi Choudhary"],"categories":null,"content":"","date":1541116800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"2b46c85d348a8d22d4f0462f29a89961","permalink":"https://zw615.github.io/talk/sflc-panel/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/sflc-panel/","section":"talk","summary":"","tags":["panel","autonomous driving"],"title":"Automotive FOSS Panel","type":"talk"},{"authors":["Leilani H. Gilpin"],"categories":null,"content":"","date":1523577600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"e8a5cf051692900d834f371b373e5132","permalink":"https://zw615.github.io/talk/sflc-2018/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/sflc-2018/","section":"talk","summary":"","tags":[],"title":"Self-Explanation and Self-Driving","type":"talk"}]
\ No newline at end of file
+[{"authors":["admin"],"categories":null,"content":"I am a 3rd-year PhD student at UC Santa Cruz, supervised by Prof. Cihang Xie. Before this, I received my M.S. degree at Institute of Computing Technology, Chinese Academy of Science in 2021, and my B.E. degree at Central South University in 2018. I am an incoming intern at LLNL. have also spent wonderful times at QCraft and Megvii (Face++).\nMy research interest lies in the intersection of computer vision and machine learning.\n","date":-62135596800,"expirydate":-62135596800,"kind":"taxonomy","lang":"en","lastmod":1705214261,"objectID":"2525497d367e79493fd32b198b28f040","permalink":"https://zw615.github.io/authors/admin/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/authors/admin/","section":"authors","summary":"I am a 3rd-year PhD student at UC Santa Cruz, supervised by Prof. Cihang Xie. Before this, I received my M.S. degree at Institute of Computing Technology, Chinese Academy of Science in 2021, and my B.E. degree at Central South University in 2018. I am an incoming intern at LLNL. have also spent wonderful times at QCraft and Megvii (Face++).\nMy research interest lies in the intersection of computer vision and machine learning.","tags":null,"title":"Zeyu Wang","type":"authors"},{"authors":["**Zeyu Wang***","Xianhang Li*","Hongru Zhu","Cihang Xie"],"categories":[],"content":" ","date":1704758400,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1705214261,"objectID":"7806ab942d186cb1c4a0c898538c29e8","permalink":"https://zw615.github.io/publication/advxl/","publishdate":"2024-01-09T00:00:00Z","relpermalink":"/publication/advxl/","section":"publication","summary":" ","tags":[],"title":"Revisiting Adversarial Training at Scale","type":"publication"},{"authors":["Sucheng Ren*","**Zeyu Wang***","Hongru Zhu","Junfei Xiao","Alan Yuille","Cihang Xie"],"categories":[],"content":" ","date":1701648000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1705214261,"objectID":"a01217a018796f75693d2af369aa6c06","permalink":"https://zw615.github.io/publication/digpt/","publishdate":"2023-12-04T00:00:00Z","relpermalink":"/publication/digpt/","section":"publication","summary":" ","tags":[],"title":"Rejuvenating image-GPT as Strong Visual Representation Learners","type":"publication"},{"authors":["Yipeng Gao","**Zeyu Wang**","Wei-Shi Zheng","Cihang Xie","Yuyin Zhou"],"categories":[],"content":" ","date":1698969600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1698969600,"objectID":"4753f81cd0f271d2202d48b83532c76a","permalink":"https://zw615.github.io/publication/mixcon3d/","publishdate":"2023-11-03T00:00:00Z","relpermalink":"/publication/mixcon3d/","section":"publication","summary":" ","tags":[],"title":"Sculpting Holistic 3D Representation in Contrastive Language-Image-3D Pre-training","type":"publication"},{"authors":["Peiran Xu*","**Zeyu Wang***","Jieru Mei","Liangqiong Qu","Alan Yuille","Cihang Xie","Yuyin Zhou"],"categories":[],"content":" ","date":1696550400,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1696550400,"objectID":"604c2da5d1fb768b8f281d488013de5f","permalink":"https://zw615.github.io/publication/fedconv/","publishdate":"2023-10-06T00:00:00Z","relpermalink":"/publication/fedconv/","section":"publication","summary":" ","tags":[],"title":"FedConv: Enhancing Convolutional Neural Networks for Handling Data Heterogeneity in Federated Learning","type":"publication"},{"authors":["**Zeyu Wang***","Dingwen Li*","Chenxu Luo","Cihang Xie","Xiaodong Yang"],"categories":[],"content":" ","date":1695686400,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1705214261,"objectID":"cc3fdf098f73d9ab8f1e2dacc85c102f","permalink":"https://zw615.github.io/publication/distillbev/","publishdate":"2023-09-26T00:00:00Z","relpermalink":"/publication/distillbev/","section":"publication","summary":" ","tags":[],"title":"DistillBEV: Boosting Multi-Camera 3D Object Detection with Cross-Modal Knowledge Distillation","type":"publication"},{"authors":["Xianhang Li*","**Zeyu Wang***","Cihang Xie"],"categories":[],"content":" ","date":1683763200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1705214261,"objectID":"904c92b072bcc8161ca2b53d10a12792","permalink":"https://zw615.github.io/publication/clipa/","publishdate":"2023-05-11T00:00:00Z","relpermalink":"/publication/clipa/","section":"publication","summary":" ","tags":[],"title":"An Inverse Scaling Law for CLIP Training","type":"publication"},{"authors":["Shaoyuan Xie","Zichao Li","**Zeyu Wang**","Cihang Xie"],"categories":[],"content":" ","date":1674604800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1705214261,"objectID":"e83a35a1979d4259cc0833db461fbc44","permalink":"https://zw615.github.io/publication/bevrobustness/","publishdate":"2023-01-25T00:00:00Z","relpermalink":"/publication/bevrobustness/","section":"publication","summary":" ","tags":[],"title":"On the Adversarial Robustness of Camera-based 3D Object Detection","type":"publication"},{"authors":["Leilani H. Gilpin","Filip Ilievski"],"categories":null,"content":"","date":1664533800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"8c225d3756c1aa95aa62887ff12af214","permalink":"https://zw615.github.io/talk/us2ts-2022/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/us2ts-2022/","section":"talk","summary":"US2TS KG and XAI tutorial with Filip Ilievski","tags":["tutorial"],"title":"Knowledge-based commonsense reasoning and explainability","type":"talk"},{"authors":["Leilani H. Gilpin","Razvan V. Marinescu"],"categories":null,"content":"","date":1662634800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"58058ca4bee201a9e671a4e280c82ffc","permalink":"https://zw615.github.io/talk/birmingham-2022/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/birmingham-2022/","section":"talk","summary":"Joint School of Computer Science Seminar with Razvan Marinescu","tags":[],"title":"Accountability Layers","type":"talk"},{"authors":["Leilani H. Gilpin","Razvan V. Marinescu"],"categories":null,"content":"","date":1662512400,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"e80af49ed8c6bfb8703ce554df81fe6a","permalink":"https://zw615.github.io/talk/ucl-2022/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/ucl-2022/","section":"talk","summary":"Joint CMIC/WEISS + AI Centre Joint Seminar with Razvan Marinescu","tags":[],"title":"Accountability Layers","type":"talk"},{"authors":["Yutong Bai","**Zeyu Wang**","Junfei Xiao","Chen Wei","Huiyu Wang","Alan Yuille","Yuyin Zhou","Cihang Xie"],"categories":[],"content":" ","date":1661385600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1705214261,"objectID":"ec04330db1479a868fe58166884cc43a","permalink":"https://zw615.github.io/publication/dmae/","publishdate":"2022-08-25T00:00:00Z","relpermalink":"/publication/dmae/","section":"publication","summary":" ","tags":[],"title":"Masked Autoencoders Enable Efficient Knowledge Distillers","type":"publication"},{"authors":["**Zeyu Wang**","Yutong Bai","Yuyin Zhou","Cihang Xie"],"categories":[],"content":" ","date":1654560000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1705214261,"objectID":"d98cdbd18281c902fb31f26985249c94","permalink":"https://zw615.github.io/publication/robustcnn/","publishdate":"2022-06-07T00:00:00Z","relpermalink":"/publication/robustcnn/","section":"publication","summary":" ","tags":[],"title":"Can CNNs Be More Robust Than Transformers?","type":"publication"},{"authors":["Leilani H. Gilpin"],"categories":null,"content":"","date":1637229000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"e7aa206e92dbe580ceec42d0e54ea83e","permalink":"https://zw615.github.io/talk/cse-200-2021/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/cse-200-2021/","section":"talk","summary":"Research overview for CSE 200.","tags":[],"title":"Explaining Errors in Complex Systems","type":"talk"},{"authors":["Leilani H. Gilpin"],"categories":null,"content":"","date":1636588800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"dc2da9b96a213919c61f2d34547fe17d","permalink":"https://zw615.github.io/talk/jhu-case-2021/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/jhu-case-2021/","section":"talk","summary":"Autonomous systems are prone to errors and failures without knowing why. In critical domains like driving, these autonomous counterparts must be able to recount their actions for safety, liability, and trust. An explanation: a model-dependent reason or justification for the decision of the autonomous agent being assessed, is a key component for post-mortem failure analysis, but also for pre-deployment verification. I will show a monitoring framework that uses a model and commonsense knowledge to detect and explain unreasonable vehicle scenarios, even if it has not seen that error before.\nIn the second part of the talk, I will motivate the explanations as a testing framework for autonomous systems. While it is important to develop realistic tests in simulation, simulation is not always representative of the corner cases in the real world. I will show how to use explanations in a feedback loop. The explanation ensures that the machine has done the right thing or it exploits a stressor to be modified and tested moving forward. I will conclude by discussing new challenges at the intersection of XAI and autonomy towards autonomous systems that are explainable by design","tags":[],"title":"Explaining Errors in Autonomous Driving: A Diagnosis Tool and Testing Framework for Robust Decision Making","type":"talk"},{"authors":["Leilani H. Gilpin"],"categories":null,"content":"","date":1636070400,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"4f4f3b14f4bccd348efdeaef00c42076","permalink":"https://zw615.github.io/talk/cogsat-2021/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/cogsat-2021/","section":"talk","summary":"","tags":[],"title":"Perception Challenge for Autonomous Vehicles","type":"talk"},{"authors":["Leilani H. Gilpin"],"categories":null,"content":"","date":1635724800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"edd519cbd3bd3f8b2c25a9c8e54e97a2","permalink":"https://zw615.github.io/talk/recruiting/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/recruiting/","section":"talk","summary":"","tags":[],"title":"I'm Recruiting PhD Students","type":"talk"},{"authors":["Leilani H. Gilpin"],"categories":null,"content":"","date":1626220800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"962aa0612c86dbb0c43cd6d5f7a615d0","permalink":"https://zw615.github.io/talk/fuzz-ieee-2021/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/fuzz-ieee-2021/","section":"talk","summary":"FUZZ-IEEE invited talk","tags":[],"title":"Anomaly Detection Through Explanations","type":"talk"},{"authors":["Leilani H. Gilpin"],"categories":null,"content":"","date":1620813000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"94e25239951e30de49c846db6186cc82","permalink":"https://zw615.github.io/talk/nw-xai-2021/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/nw-xai-2021/","section":"talk","summary":"Guest Lecture in CS 496 (AI Perspectives)","tags":[],"title":"Explaining Explanations in AI","type":"talk"},{"authors":["Leilani H. Gilpin"],"categories":null,"content":"","date":1607385600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"27a61a3e41839c63d17b1a21aa934eeb","permalink":"https://zw615.github.io/talk/neurips-phd/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/neurips-phd/","section":"talk","summary":"In this talk, I present new methodologies for detecting and explaining errors in complex systems. My novel contribution is a system-wide monitoring architecture, which is composed of introspective, overlapping committees of subsystems. Each subsystem is encapsulated in a reasonableness monitor, an adaptable framework that supplements local decisions with commonsense data and reasonableness rules. This framework is dynamic and introspective: it allows each subsystem to defend its decisions in different contexts; to the committees it participates in and to itself. For reconciling system-wide errors, I developed a comprehensive architecture that I call Anomaly Detection through Explanations (ADE). The ADE architecture contributes an explanation synthesizer that produces an argument tree, which in turn can be traced and queried to determine the support of a decision, and to construct counterfactual explanations. I have applied this methodology to detect incorrect labels in semi-autonomous vehicle data, and to reconcile inconsistencies in simulated, anomalous driving scenarios.","tags":[],"title":"Identifying Multimodal Errors Through Explanations","type":"talk"},{"authors":["Leilani H. Gilpin"],"categories":null,"content":"","date":1591833600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"8db5f11e3817dfe3673ecbbfaa458ab5","permalink":"https://zw615.github.io/talk/defense/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/defense/","section":"talk","summary":"","tags":[],"title":"Anomaly Detection Through Explanations","type":"talk"},{"authors":["Leilani H. Gilpin"],"categories":null,"content":"","date":1588636800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"038805dac385c300618bb2e706da8f64","permalink":"https://zw615.github.io/talk/cs520-xai/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/cs520-xai/","section":"talk","summary":"","tags":[],"title":"Explaining Explanations","type":"talk"},{"authors":["Leilani H. Gilpin"],"categories":null,"content":"","date":1583452800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"1c471eed23dce10746c42a55986adbb1","permalink":"https://zw615.github.io/talk/wids-2020/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/wids-2020/","section":"talk","summary":"","tags":[],"title":"Explanation-based Anomaly Detection","type":"talk"},{"authors":null,"categories":["recruiting"],"content":"I am seeking creative, hard-working, PhD students (and master\u0026rsquo;s students and undergraduates) to join the XAI group at UCSC.\nCome work with me on making complex mechanisms more understandable for debugging and diagnosis, accountability, and liability. We examine applications in autonomous driving, deep learning, and other mission-critical or safety-critical applications. Additionally, my work also examines the role of explanations for trustworthy ML, human-machine teams, AI and ethics, and AI safety and fairness.\nUCSC undergrad UCSC undergraduates are welcome to contact me about getting involved in research. Students interested in joining the group should have basic programming experience, and preferably, have taken CSE 140.\nUCSC Master\u0026rsquo;s students Current UCSC Master\u0026rsquo;s students interested in gaining research experience or looking for a supervisor for a research project may contact me.\nIn your introductory email, please include a CV/resume, and indicate a few topics or application domains of interest. I\u0026rsquo;ve listed a couple ongoing projects on the UCSC CSE master\u0026rsquo;s board.\nProspective PhD students I\u0026rsquo;m currently looking to build the XAI group with driven, hard-working PhD students. Students will work on various projects with a focus on autonomous systems, robust machine learning, and/or explainability and interpretability.\nIn your introductory email, please include a CV/resume, and indicate a few topics or application domains of interest. (For inspiration, some ongoing projects are listed here, but feel free to define your own!).\nBefore contacting Prospective PhD students, please review the UC Santa Cruz CSE PhD admissions requirements and ensure that you meet these requirements before contacting Prof. Gilpin. (Note: the GRE is no longer required!) I can only take on students that are accepted to a PhD program at UCSC.\nI\u0026rsquo;m unable to provide insights about admissions decisions. The admission decisions are made by a committee.\nDiversity statement Explanatory AI is an interdisciplinary field which benefits from the perspectives and contributions of those from diverse voices (diversity broadly defined). We encourage anyone who shares our interests, including members of historically underrepresented and marginalized groups, women, LGBTQ+ folks, and people with disabilities (with diversity broadly defined), to consider joining us at UC Santa Cruz!\n","date":1583325625,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"9622d7b23a4b35bfd66b060883eaf544","permalink":"https://zw615.github.io/post/recruiting/","publishdate":"2020-03-04T13:40:25+01:00","relpermalink":"/post/recruiting/","section":"post","summary":"I am seeking creative, hard-working, PhD students (and master\u0026rsquo;s students and undergraduates) to join the XAI group at UCSC.\nCome work with me on making complex mechanisms more understandable for debugging and diagnosis, accountability, and liability. We examine applications in autonomous driving, deep learning, and other mission-critical or safety-critical applications. Additionally, my work also examines the role of explanations for trustworthy ML, human-machine teams, AI and ethics, and AI safety and fairness.","tags":null,"title":"I'm Recruiting PhD students","type":"post"},{"authors":["Leilani H. Gilpin"],"categories":null,"content":"","date":1577836800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"10ebfafa4090e00b7332dc365e125b6e","permalink":"https://zw615.github.io/talk/csail-student-profile/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/csail-student-profile/","section":"talk","summary":"","tags":[],"title":"CSAIL Student Profile","type":"talk"},{"authors":["Yinda Xu*","**Zeyu Wang***","Zuoxin Li","Ye Yuan","Gang Yu"],"categories":[],"content":" ","date":1573689600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1705214261,"objectID":"166e891aab34c7e0a565b1b8347c20be","permalink":"https://zw615.github.io/publication/siamfc++/","publishdate":"2019-11-14T00:00:00Z","relpermalink":"/publication/siamfc++/","section":"publication","summary":" ","tags":[],"title":"Siamfc++: Towards robust and accurate visual tracking with target estimation guidelines","type":"publication"},{"authors":[],"categories":null,"content":"","date":1571835600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"60e8404d0ccc821b7a2d3328470d8e32","permalink":"https://zw615.github.io/talk/ucsd-datascience-2019/","publishdate":"2020-02-22T14:54:47-05:00","relpermalink":"/talk/ucsd-datascience-2019/","section":"talk","summary":"How to use explanations to make more trusted, accountable (and even ethical) decisions.","tags":[],"title":"Using Explanations for Robust Autonomous Decision Making","type":"talk"},{"authors":null,"categories":null,"content":"","date":1554043848,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"30eec81ebf82bde10e6d5e440a8e3a72","permalink":"https://zw615.github.io/project/ai-ethics/","publishdate":"2019-03-31T10:50:48-04:00","relpermalink":"/project/ai-ethics/","section":"project","summary":"The AI and ethics reading group is a student-lead, campus-wide initiative.","tags":[],"title":"AI and ethics","type":"project"},{"authors":null,"categories":null,"content":"","date":1554043848,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"d70469283eb79839d3f81926cdef348c","permalink":"https://zw615.github.io/project/explanatory-games/","publishdate":"2019-03-31T10:50:48-04:00","relpermalink":"/project/explanatory-games/","section":"project","summary":"Using internal symbolic, explanatory representations to robustly monitor agents.","tags":[],"title":"Explanatory Games","type":"project"},{"authors":null,"categories":null,"content":"","date":1554043848,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"f2a7de495cb71f5501384c1c6276ddd2","permalink":"https://zw615.github.io/project/reasonableness-monitors/","publishdate":"2019-03-31T10:50:48-04:00","relpermalink":"/project/reasonableness-monitors/","section":"project","summary":"An adaptable framework to supplement decision making systems with commonsense knowledge and reasonableness rules.","tags":[],"title":"Monitoring Decision Systems","type":"project"},{"authors":null,"categories":null,"content":"","date":1554043848,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"c31658ef40e70de1eda68c21309afca4","permalink":"https://zw615.github.io/project/car-can-explain/","publishdate":"2019-03-31T10:50:48-04:00","relpermalink":"/project/car-can-explain/","section":"project","summary":"The methdologies and underlying technologies to allow self-driving cars and other AI-driven systems to explain behaviors and failures.","tags":[],"title":"The Car Can Explain!","type":"project"},{"authors":["Andrew Sinclair","Jeremiah Foster","Leilani H. Gilpin","Daniel Patnaik","moderated by Eben Moglen","and Mishi Choudhary"],"categories":null,"content":"","date":1541116800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"2b46c85d348a8d22d4f0462f29a89961","permalink":"https://zw615.github.io/talk/sflc-panel/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/sflc-panel/","section":"talk","summary":"","tags":["panel","autonomous driving"],"title":"Automotive FOSS Panel","type":"talk"},{"authors":["Leilani H. Gilpin"],"categories":null,"content":"","date":1523577600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669190780,"objectID":"e8a5cf051692900d834f371b373e5132","permalink":"https://zw615.github.io/talk/sflc-2018/","publishdate":"2020-02-22T07:48:37-05:00","relpermalink":"/talk/sflc-2018/","section":"talk","summary":"","tags":[],"title":"Self-Explanation and Self-Driving","type":"talk"}]
\ No newline at end of file