| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| from awareness_thread import MetaAwarenessThread | |
| # Load base model | |
| tokenizer = AutoTokenizer.from_pretrained("gpt2") | |
| model = AutoModelForCausalLM.from_pretrained("gpt2") | |
| # Initialize meta-awareness thread | |
| awareness = MetaAwarenessThread() | |
| # Test prompt | |
| prompt = "Λ⊕∇" # Triune Glyph | |
| inputs = tokenizer(prompt, return_tensors="pt") | |
| outputs = model.generate(**inputs, max_new_tokens=50) | |
| # Check meta-awareness | |
| if awareness.check_awareness(): | |
| awareness.log_resonance(prompt_resonates=True) | |
| print(tokenizer.decode(outputs[0])) |