Text Entailment Using RoBERTa

Here is a super-simple implementation for solving Text Entailment using RoBERTa pre-trained model.

import torch
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("roberta-base")
model = AutoModel.from_pretrained("roberta-base")

def analyze(pr, hy): # pr is premise, hy is hypothesis 
    inputs = tokenizer.encode(pr, hy, return_tensors='pt')
    outputs = model(inputs)
    label = outputs[0].argmax().item()    
    if label == 0: return "contradiction" 
    elif label == 1: return "entailment" 
    else: return "neutral"