<?xml version="1.0" encoding="utf-8" standalone="yes"?><rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>Large scale training; Transformer; Protein | Pascal Notin</title><link>https://www.pascalnotin.com/tag/large-scale-training-transformer-protein/</link><atom:link href="https://www.pascalnotin.com/tag/large-scale-training-transformer-protein/index.xml" rel="self" type="application/rss+xml"/><description>Large scale training; Transformer; Protein</description><generator>Wowchemy (https://wowchemy.com)</generator><language>en-us</language><copyright>© 2021, Pascal Notin</copyright><lastBuildDate>Wed, 11 May 2022 00:00:00 +0000</lastBuildDate><item><title>RITA: a Study on Scaling Up Generative Protein Sequence Models</title><link>https://www.pascalnotin.com/publication/rita/</link><pubDate>Wed, 11 May 2022 00:00:00 +0000</pubDate><guid>https://www.pascalnotin.com/publication/rita/</guid><description/></item></channel></rss>