File size: 1,788 Bytes
47bae79
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
#!/usr/bin/env python3
"""Simple test for local HuggingFace models"""

import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

def test_local_model():
    print("πŸ§ͺ Testing Local HuggingFace Model...")
    
    # Check device
    device = "cuda" if torch.cuda.is_available() else "cpu"
    print(f"πŸ–₯️  Using device: {device}")
    
    # Load model
    model_name = "microsoft/DialoGPT-small"
    print(f"πŸ“¦ Loading {model_name}...")
    
    try:
        tokenizer = AutoTokenizer.from_pretrained(model_name)
        model = AutoModelForCausalLM.from_pretrained(model_name)
        
        if tokenizer.pad_token is None:
            tokenizer.pad_token = tokenizer.eos_token
        
        print("βœ… Model loaded successfully!")
        
        # Test generation
        text = "Hello, how are you?"
        inputs = tokenizer.encode(text, return_tensors="pt")
        
        with torch.no_grad():
            outputs = model.generate(
                inputs, 
                max_new_tokens=50,
                do_sample=True,
                temperature=0.7,
                pad_token_id=tokenizer.eos_token_id
            )
        
        response = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)
        print(f"πŸ€– Model response: {response}")
        print("βœ… Local HuggingFace model is working!")
        
        return True
        
    except Exception as e:
        print(f"❌ Error: {e}")
        return False

if __name__ == "__main__":
    success = test_local_model()
    if success:
        print("\nπŸŽ‰ You can now run the main test with local HuggingFace models!")
    else:
        print("\n❌ Setup incomplete. Check the error messages above.")