Spaces:
Sleeping
Sleeping
| #!/usr/bin/env python3 | |
| """Simple test for local HuggingFace models""" | |
| import torch | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| def test_local_model(): | |
| print("π§ͺ Testing Local HuggingFace Model...") | |
| # Check device | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| print(f"π₯οΈ Using device: {device}") | |
| # Load model | |
| model_name = "microsoft/DialoGPT-small" | |
| print(f"π¦ Loading {model_name}...") | |
| try: | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForCausalLM.from_pretrained(model_name) | |
| if tokenizer.pad_token is None: | |
| tokenizer.pad_token = tokenizer.eos_token | |
| print("β Model loaded successfully!") | |
| # Test generation | |
| text = "Hello, how are you?" | |
| inputs = tokenizer.encode(text, return_tensors="pt") | |
| with torch.no_grad(): | |
| outputs = model.generate( | |
| inputs, | |
| max_new_tokens=50, | |
| do_sample=True, | |
| temperature=0.7, | |
| pad_token_id=tokenizer.eos_token_id | |
| ) | |
| response = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True) | |
| print(f"π€ Model response: {response}") | |
| print("β Local HuggingFace model is working!") | |
| return True | |
| except Exception as e: | |
| print(f"β Error: {e}") | |
| return False | |
| if __name__ == "__main__": | |
| success = test_local_model() | |
| if success: | |
| print("\nπ You can now run the main test with local HuggingFace models!") | |
| else: | |
| print("\nβ Setup incomplete. Check the error messages above.") |