import numpy as np
# Initialisierung der Schwellenwerte
lower_threshold = 0.8
upper_threshold = 1.2
# Lernrate
learning_rate = 0.1
# Trainingsdaten (XOR-Problem)
inputs = [[0, 0], [0, 1], [1, 0], [1, 1]]
targets = [0, 1, 1, 0]
# Trainingsloop mit max. 1000 Iterationen
max_iterations = 1000
epoch = 0
network_trained = False
start_weights = None
final_weights = None
all_epoch_outputs = [] # Store outputs of all epochs for debugging and transparency
while epoch < max_iterations:
epoch += 1
all_correct = True # Flag, um zu überprüfen, ob alle Ausgaben korrekt sind
current_weights
= np.
random.
rand(2) # Zufällige Startgewichte
if epoch == 1: # Die erste Iteration nach Initialisierung
start_weights = current_weights # Speichere die Startgewichte
epoch_outputs = [] # To store outputs of this epoch
for input_vector, target in zip(inputs, targets):
# Berechnung der gewichteten Summe
weighted_sum = np.dot(input_vector, current_weights)
# Aktivierungsfunktion (einfache Schwellenwertfunktion)
output = 1 if lower_threshold < weighted_sum < upper_threshold else 0
# Fehlerberechnung
error = target - output
# Wenn ein Fehler vorliegt, dann weise die Gewichte an
if error != 0:
all_correct = False
current_weights += learning_rate * error * np.array(input_vector)
epoch_outputs.append((input_vector, output, target)) # Save each iteration's output
all_epoch_outputs.append(epoch_outputs)
# Überprüfe, ob alle Ausgaben korrekt sind
if all_correct:
network_trained = True
final_weights = current_weights # Speichere die finalen Gewichte
break # Stoppe, wenn alle Ausgaben korrekt sind
# Wenn XOR nach 100 Iterationen nicht gelernt wurde, setze neue zufällige Startgewichte
if epoch % 100 == 0: # 100 statt 20
print(f"Nicht funktionierende Startgewichte: {start_weights}")
start_weights
= np.
random.
rand(2) # Setze neue Startgewichte
if network_trained:
print(f"Das Netzwerk hat XOR korrekt nach {epoch} Iterationen gelernt.")
print(f"Die Working Startgewichte waren: {start_weights}")
print(f"Die finalen Gewichte sind: {final_weights}")
else:
print(f"Das Netzwerk hat XOR nach {epoch} Iterationen nicht korrekt gelernt.")
# Testen des Netzwerks nach den Lern-Iterationen
print("\nFinal Test Output:")
for input_vector, target in zip(inputs, targets):
weighted_sum = np.dot(input_vector, final_weights)
output = 1 if lower_threshold < weighted_sum < upper_threshold else 0
print(f"Input: {input_vector}, Target: {target}, Output: {output}")
# Optionally, print out the outputs of each epoch for transparency
print("\nEpoch Outputs:")
for epoch_index, epoch_outputs in enumerate(all_epoch_outputs):
print(f"Epoch {epoch_index + 1}:")
for input_vector, output, target in epoch_outputs:
print(f" Input: {input_vector}, Output: {output}, Target: {target}")
aW1wb3J0IG51bXB5IGFzIG5wCgojIEluaXRpYWxpc2llcnVuZyBkZXIgU2Nod2VsbGVud2VydGUKbG93ZXJfdGhyZXNob2xkID0gMC44CnVwcGVyX3RocmVzaG9sZCA9IDEuMgoKIyBMZXJucmF0ZQpsZWFybmluZ19yYXRlID0gMC4xCgojIFRyYWluaW5nc2RhdGVuIChYT1ItUHJvYmxlbSkKaW5wdXRzID0gW1swLCAwXSwgWzAsIDFdLCBbMSwgMF0sIFsxLCAxXV0KdGFyZ2V0cyA9IFswLCAxLCAxLCAwXQoKIyBUcmFpbmluZ3Nsb29wIG1pdCBtYXguIDEwMDAgSXRlcmF0aW9uZW4KbWF4X2l0ZXJhdGlvbnMgPSAxMDAwCmVwb2NoID0gMApuZXR3b3JrX3RyYWluZWQgPSBGYWxzZQpzdGFydF93ZWlnaHRzID0gTm9uZQpmaW5hbF93ZWlnaHRzID0gTm9uZQphbGxfZXBvY2hfb3V0cHV0cyA9IFtdICAjIFN0b3JlIG91dHB1dHMgb2YgYWxsIGVwb2NocyBmb3IgZGVidWdnaW5nIGFuZCB0cmFuc3BhcmVuY3kKCndoaWxlIGVwb2NoIDwgbWF4X2l0ZXJhdGlvbnM6CiAgICBlcG9jaCArPSAxCiAgICBhbGxfY29ycmVjdCA9IFRydWUgICMgRmxhZywgdW0genUgw7xiZXJwcsO8ZmVuLCBvYiBhbGxlIEF1c2dhYmVuIGtvcnJla3Qgc2luZAogICAgY3VycmVudF93ZWlnaHRzID0gbnAucmFuZG9tLnJhbmQoMikgICMgWnVmw6RsbGlnZSBTdGFydGdld2ljaHRlCgogICAgaWYgZXBvY2ggPT0gMTogICMgRGllIGVyc3RlIEl0ZXJhdGlvbiBuYWNoIEluaXRpYWxpc2llcnVuZwogICAgICAgIHN0YXJ0X3dlaWdodHMgPSBjdXJyZW50X3dlaWdodHMgICMgU3BlaWNoZXJlIGRpZSBTdGFydGdld2ljaHRlCgogICAgZXBvY2hfb3V0cHV0cyA9IFtdICAjIFRvIHN0b3JlIG91dHB1dHMgb2YgdGhpcyBlcG9jaAoKICAgIGZvciBpbnB1dF92ZWN0b3IsIHRhcmdldCBpbiB6aXAoaW5wdXRzLCB0YXJnZXRzKToKICAgICAgICAjIEJlcmVjaG51bmcgZGVyIGdld2ljaHRldGVuIFN1bW1lCiAgICAgICAgd2VpZ2h0ZWRfc3VtID0gbnAuZG90KGlucHV0X3ZlY3RvciwgY3VycmVudF93ZWlnaHRzKQoKICAgICAgICAjIEFrdGl2aWVydW5nc2Z1bmt0aW9uIChlaW5mYWNoZSBTY2h3ZWxsZW53ZXJ0ZnVua3Rpb24pCiAgICAgICAgb3V0cHV0ID0gMSBpZiBsb3dlcl90aHJlc2hvbGQgPCB3ZWlnaHRlZF9zdW0gPCB1cHBlcl90aHJlc2hvbGQgZWxzZSAwCgogICAgICAgICMgRmVobGVyYmVyZWNobnVuZwogICAgICAgIGVycm9yID0gdGFyZ2V0IC0gb3V0cHV0CgogICAgICAgICMgV2VubiBlaW4gRmVobGVyIHZvcmxpZWd0LCBkYW5uIHdlaXNlIGRpZSBHZXdpY2h0ZSBhbgogICAgICAgIGlmIGVycm9yICE9IDA6CiAgICAgICAgICAgIGFsbF9jb3JyZWN0ID0gRmFsc2UKICAgICAgICAgICAgY3VycmVudF93ZWlnaHRzICs9IGxlYXJuaW5nX3JhdGUgKiBlcnJvciAqIG5wLmFycmF5KGlucHV0X3ZlY3RvcikKCiAgICAgICAgZXBvY2hfb3V0cHV0cy5hcHBlbmQoKGlucHV0X3ZlY3Rvciwgb3V0cHV0LCB0YXJnZXQpKSAgIyBTYXZlIGVhY2ggaXRlcmF0aW9uJ3Mgb3V0cHV0CgogICAgYWxsX2Vwb2NoX291dHB1dHMuYXBwZW5kKGVwb2NoX291dHB1dHMpCgogICAgIyDDnGJlcnByw7xmZSwgb2IgYWxsZSBBdXNnYWJlbiBrb3JyZWt0IHNpbmQKICAgIGlmIGFsbF9jb3JyZWN0OgogICAgICAgIG5ldHdvcmtfdHJhaW5lZCA9IFRydWUKICAgICAgICBmaW5hbF93ZWlnaHRzID0gY3VycmVudF93ZWlnaHRzICAjIFNwZWljaGVyZSBkaWUgZmluYWxlbiBHZXdpY2h0ZQogICAgICAgIGJyZWFrICAjIFN0b3BwZSwgd2VubiBhbGxlIEF1c2dhYmVuIGtvcnJla3Qgc2luZAoKICAgICMgV2VubiBYT1IgbmFjaCAxMDAgSXRlcmF0aW9uZW4gbmljaHQgZ2VsZXJudCB3dXJkZSwgc2V0emUgbmV1ZSB6dWbDpGxsaWdlIFN0YXJ0Z2V3aWNodGUKICAgIGlmIGVwb2NoICUgMTAwID09IDA6ICAjIDEwMCBzdGF0dCAyMAogICAgICAgIHByaW50KGYiTmljaHQgZnVua3Rpb25pZXJlbmRlIFN0YXJ0Z2V3aWNodGU6IHtzdGFydF93ZWlnaHRzfSIpCiAgICAgICAgc3RhcnRfd2VpZ2h0cyA9IG5wLnJhbmRvbS5yYW5kKDIpICAjIFNldHplIG5ldWUgU3RhcnRnZXdpY2h0ZQoKaWYgbmV0d29ya190cmFpbmVkOgogICAgcHJpbnQoZiJEYXMgTmV0endlcmsgaGF0IFhPUiBrb3JyZWt0IG5hY2gge2Vwb2NofSBJdGVyYXRpb25lbiBnZWxlcm50LiIpCiAgICBwcmludChmIkRpZSBXb3JraW5nIFN0YXJ0Z2V3aWNodGUgd2FyZW46IHtzdGFydF93ZWlnaHRzfSIpCiAgICBwcmludChmIkRpZSBmaW5hbGVuIEdld2ljaHRlIHNpbmQ6IHtmaW5hbF93ZWlnaHRzfSIpCmVsc2U6CiAgICBwcmludChmIkRhcyBOZXR6d2VyayBoYXQgWE9SIG5hY2gge2Vwb2NofSBJdGVyYXRpb25lbiBuaWNodCBrb3JyZWt0IGdlbGVybnQuIikKCiMgVGVzdGVuIGRlcyBOZXR6d2Vya3MgbmFjaCBkZW4gTGVybi1JdGVyYXRpb25lbgpwcmludCgiXG5GaW5hbCBUZXN0IE91dHB1dDoiKQpmb3IgaW5wdXRfdmVjdG9yLCB0YXJnZXQgaW4gemlwKGlucHV0cywgdGFyZ2V0cyk6CiAgICB3ZWlnaHRlZF9zdW0gPSBucC5kb3QoaW5wdXRfdmVjdG9yLCBmaW5hbF93ZWlnaHRzKQogICAgb3V0cHV0ID0gMSBpZiBsb3dlcl90aHJlc2hvbGQgPCB3ZWlnaHRlZF9zdW0gPCB1cHBlcl90aHJlc2hvbGQgZWxzZSAwCiAgICBwcmludChmIklucHV0OiB7aW5wdXRfdmVjdG9yfSwgVGFyZ2V0OiB7dGFyZ2V0fSwgT3V0cHV0OiB7b3V0cHV0fSIpCgojIE9wdGlvbmFsbHksIHByaW50IG91dCB0aGUgb3V0cHV0cyBvZiBlYWNoIGVwb2NoIGZvciB0cmFuc3BhcmVuY3kKcHJpbnQoIlxuRXBvY2ggT3V0cHV0czoiKQpmb3IgZXBvY2hfaW5kZXgsIGVwb2NoX291dHB1dHMgaW4gZW51bWVyYXRlKGFsbF9lcG9jaF9vdXRwdXRzKToKICAgIHByaW50KGYiRXBvY2gge2Vwb2NoX2luZGV4ICsgMX06IikKICAgIGZvciBpbnB1dF92ZWN0b3IsIG91dHB1dCwgdGFyZ2V0IGluIGVwb2NoX291dHB1dHM6CiAgICAgICAgcHJpbnQoZiIgIElucHV0OiB7aW5wdXRfdmVjdG9yfSwgT3V0cHV0OiB7b3V0cHV0fSwgVGFyZ2V0OiB7dGFyZ2V0fSIpCg==
Das Netzwerk hat XOR korrekt nach 3 Iterationen gelernt.
Die Working Startgewichte waren: [0.70419481 0.99219951]
Die finalen Gewichte sind: [0.80546083 0.87434976]
Final Test Output:
Input: [0, 0], Target: 0, Output: 0
Input: [0, 1], Target: 1, Output: 1
Input: [1, 0], Target: 1, Output: 1
Input: [1, 1], Target: 0, Output: 0
Epoch Outputs:
Epoch 1:
Input: [0, 0], Output: 0, Target: 0
Input: [0, 1], Output: 1, Target: 1
Input: [1, 0], Output: 0, Target: 1
Input: [1, 1], Output: 0, Target: 0
Epoch 2:
Input: [0, 0], Output: 0, Target: 0
Input: [0, 1], Output: 0, Target: 1
Input: [1, 0], Output: 1, Target: 1
Input: [1, 1], Output: 0, Target: 0
Epoch 3:
Input: [0, 0], Output: 0, Target: 0
Input: [0, 1], Output: 1, Target: 1
Input: [1, 0], Output: 1, Target: 1
Input: [1, 1], Output: 0, Target: 0