I am a DPhil student supervised by Yee Whye Teh and Yarin Gal. My research interests span Bayesian deep learning, variational inference, and reinforcement learning. I am particularly interested in uncertainty quantification in deep learning, reinforcement learning as probabilistic inference, and probabilistic transfer learning. I obtained a master’s degree in statistics from the University of Oxford and an undergraduate degree in mathematics and economics from Yale University. I am an AI Fellow at Georgetown University’s Center for Security and Emerging Technology, a Fellow of the German Academic Scholarship Foundation, and a Rhodes Scholar.
Publications
2022
T. G. J. Rudner
,
F. Bickford Smith
,
Q. Feng
,
Y. W. Teh
,
Y. Gal
,
Continual learning via sequential function-space variational inference, International Conference on Machine Learning, 2022.
@article{rudner2022continual,
author = {Rudner, Tim G. J. and Bickford Smith, Freddie and Feng, Qixuan and Teh, Yee Whye and Gal, Yarin},
year = {2022},
title = {Continual learning via sequential function-space variational inference},
journal = {International Conference on Machine Learning}
}
2021
T. G. J. Rudner
,
C. Lu
,
M. A. Osborne
,
Y. Gal
,
Y. W. Teh
,
On Pathologies in KL-Regularized Reinforcement Learning from Expert Demonstrations, ICLR 2021 RobustML Workshop, 2021.
@article{rudner2021pathologies,
title = {On Pathologies in KL-Regularized Reinforcement Learning from Expert Demonstrations},
author = {Rudner, Tim G. J. and Lu, Cong and Osborne, Michael A. and Gal, Yarin and Teh, Yee Whye},
year = {2021},
journal = {ICLR 2021 RobustML Workshop}
}
2020
T. Rudner
,
D. Sejdinovic
,
Y. Gal
,
Inter-domain Deep Gaussian Processes, in International Conference on Machine Learning (ICML), 2020, PMLR 119:8286–8294.
Inter-domain Gaussian processes (GPs) allow for high flexibility and low computational cost when performing approximate inference in GP models. They are particularly suitable for modeling data exhibiting global function behavior but are limited to stationary covariance functions and thus fail to model non-stationary data effectively. We propose Inter-domain Deep Gaussian Processes with RKHS Fourier Features, an extension of shallow inter-domain GPs that combines the advantages of inter-domain and deep Gaussian processes (DGPs) and demonstrate how to leverage existing approximate inference approaches to perform simple and scalable approximate inference on Inter-domain Deep Gaussian Processes. We assess the performance of our method on a wide range of prediction problems and demonstrate that it outperforms inter-domain GPs and DGPs on challenging large-scale and high-dimensional real-world datasets exhibiting both global behavior as well as a high-degree of non-stationarity.
@inproceedings{RudSejGal2020,
author = {Rudner, T.G.J. and Sejdinovic, D. and Gal, Y.},
title = {{{Inter-domain Deep Gaussian Processes}}},
booktitle = {International Conference on Machine Learning (ICML)},
pages = {PMLR 119:8286--8294},
year = {2020}
}
2019
M. Fellows
,
A. Mahajan
,
T. G. J. Rudner
,
S. Whiteson
,
VIREL: A Variational Inference Framework for Reinforcement Learning, in Advances in Neural Information Processing Systems 32, 2019.
@inproceedings{Fellows:etal:2019c,
author = {Fellows, Matthew and Mahajan, Anuj and Rudner, Tim G. J. and Whiteson, Shimon},
title = {{VIREL}: {A} {V}ariational {I}nference {F}ramework for {R}einforcement {L}earning},
journal = {Advances in Neural Information Processing Systems 32},
year = {2019}
}
T. G. J. Rudner
,
M. Rußwurm
,
J. Fil
,
R. Pelich
,
B. Bischke
,
V. Kopackova
,
P. Bilinski
,
Multi³Net: Segmenting Flooded Buildings via Fusion of Multiresolution, Multisensor, and Multitemporal Satellite Imagery, in Proceedings of the Thirty-Three AAAI Conference on Artificial Intelligence, 2019.
@inproceedings{Rudner:etal:2019b,
author = {Rudner, Tim G. J. and Rußwurm, Marc and Fil, Jakub and Pelich, Ramona and Bischke, Benjamin and Kopackova, Veronika and Bilinski, Piotr},
title = {{M}ulti³{N}et: {S}egmenting {F}looded {B}uildings via {F}usion of {M}ultiresolution, {M}ultisensor, and {M}ultitemporal {S}atellite {I}magery},
journal = {Proceedings of the Thirty-Three {AAAI} Conference on Artificial Intelligence},
year = {2019}
}
M. Samvelyan
,
T. Rashid
,
C. Witt
,
G. Farquhar
,
N. Nardelli
,
T. G. J. Rudner
,
C. Hung
,
P. H. S. Torr
,
J. Foerster
,
S. Whiteson
,
The StarCraft Multi-Agent Challenge, in Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems, 2019.
@inproceedings{Samvelyan:etal:2019a,
author = {Samvelyan, Mikayel and Rashid, Tabish and Schroeder de Witt, Christian and Farquhar, Gregory and Nardelli, Nantas and Rudner, Tim G. J. and Hung, Chia-Man and Torr, Philip H. S. and Foerster, Jakob and Whiteson, Shimon},
title = {{T}he {S}tar{C}raft {M}ulti-{A}gent {C}hallenge},
journal = {Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems},
year = {2019}
}
2018
T. G. J. Rudner
,
V. Fortuin
,
Y. W. Teh
,
Y. Gal
,
On the Connection between Neural Processes and Approximate Gaussian Processes, NeurIPS 2018 Workshop on Bayesian Deep Learning, 2018.
@article{Rudner:etal:2018,
author = {Rudner, Tim G. J. and Fortuin, Vincent and Teh, Yee Whye and Gal, Yarin},
title = {{O}n the {C}onnection between {N}eural {P}rocesses and {A}pproximate {G}aussian {P}rocesses},
journal = {NeurIPS 2018 Workshop on Bayesian Deep Learning},
year = {2018}
}
@article{Rudner:Sejdinovic:2017,
author = {Rudner, Tim G. J. and Sejdinovic, Dino},
title = {{I}nter-domain {D}eep {G}aussian {P}rocesses},
journal = {NeurIPS 2017 Workshop on Bayesian Deep Learning},
year = {2017}
}