from transformers import DistilBertTokenizer, DistilBertForSequenceClassification
from transformers import TextClassificationPipeline
import torch
# 모델과 토크나이저 로드
model_name = "distilbert-base-uncased"
tokenizer = DistilBertTokenizer.from_pretrained(model_name)
model = DistilBertForSequenceClassification.from_pretrained(model_name)
# 파이프라인 설정
classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer, return_all_scores=True, framework="pt")
# 예제 텍스트
texts = ["I love using Hugging Face models.", "This is a terrible mistake."]
# 분류 수행
results = classifier(texts)
# 결과 출력
for text, result in zip(texts, results):
print(f"Text: {text}")
for label in result:
print(f" {label['label']}: {label['score']:.4f}")
좀 더 학습된 모델
from transformers import pipeline
# 감정 분석 파이프라인 설정
classifier = pipeline('sentiment-analysis')
# 예제 텍스트
texts = ["I love using Hugging Face models.", "This is a terrible mistake."]
# 분류 수행
results = classifier(texts)
# 결과 출력
for text, result in zip(texts, results):
print(f"Text: {text}")
print(f" Label: {result['label']}, Score: {result['score']:.4f}")