Initial commit

This commit is contained in:
2020-11-08 13:25:17 +01:00
commit de91c6a9cc
2 changed files with 340 additions and 0 deletions

178
rest.py Normal file
View File

@@ -0,0 +1,178 @@
RGB_SCALE = 255
CMYK_SCALE = 100
def rgb_to_cmyk(r, g, b):
if (r, g, b) == (0, 0, 0):
# black
return 0, 0, 0, CMYK_SCALE
# rgb [0,255] -> cmy [0,1]
c = 1 - r / RGB_SCALE
m = 1 - g / RGB_SCALE
y = 1 - b / RGB_SCALE
# extract out k [0, 1]
min_cmy = min(c, m, y)
c = (c - min_cmy) / (1 - min_cmy)
m = (m - min_cmy) / (1 - min_cmy)
y = (y - min_cmy) / (1 - min_cmy)
k = min_cmy
# rescale to the range [0,CMYK_SCALE]
return c * CMYK_SCALE, m * CMYK_SCALE, y * CMYK_SCALE, k * CMYK_SCALE
def filter_image(image, classifyer):
width, height = image.size
for x in range(width):
for y in range(height):
r,g,b = image.getpixel((x,y))
if classifyer(r,g,b):
image.putpixel((x,y), (255,0,0))
image.save('test.png')
DELTA = 1
def find_left(image,x,y,classifyer):
left = (x,y)
loop = True
while loop:
nxt = left[0], left[1] - 1
rr,gg,bb = image.getpixel(nxt)
if not classifyer(rr,gg,bb):
nxt = left[0] - DELTA, left[1] -1
rr,gg,bb = image.getpixel(nxt)
if not classifyer(rr,gg,bb):
nxt = left[0] + DELTA, left[1] -1
rr,gg,bb = image.getpixel(nxt)
if not classifyer(rr,gg,bb):
break
left = nxt
return left[1]
def find_right(image,x,y,classifyer):
right = (x,y)
loop = True
while loop:
nxt = right[0], right[1] + 1
rr,gg,bb = image.getpixel(nxt)
if not classifyer(rr,gg,bb):
nxt = right[0] - DELTA, right[1] + 1
rr,gg,bb = image.getpixel(nxt)
if not classifyer(rr,gg,bb):
nxt = right[0] + DELTA, right[1] + 1
rr,gg,bb = image.getpixel(nxt)
if not classifyer(rr,gg,bb):
break
right = nxt
return right[1]
def find_top(image, x, y, classifyer):
top = (x,y)
loop = True
while loop:
nxt = top[0] - 1, top[1]
rr,gg,bb = image.getpixel(nxt)
if not classifyer(rr,gg,bb):
nxt = top[0] - 1, top[1] - DELTA
rr,gg,bb = image.getpixel(nxt)
if not classifyer(rr,gg,bb):
nxt = top[0] - 1, top[1] + DELTA
rr,gg,bb = image.getpixel(nxt)
if not classifyer(rr,gg,bb):
break
top = nxt
return top[0]
def find_bottom(image, x, y, classifyer):
bottom = (x,y)
loop = True
while loop:
nxt = bottom[0] + 1, bottom[1]
rr,gg,bb = image.getpixel(nxt)
if not classifyer(rr,gg,bb):
nxt = bottom[0] + 1, bottom[1] - DELTA
rr,gg,bb = image.getpixel(nxt)
if not classifyer(rr,gg,bb):
nxt = bottom[0] + 1, bottom[1] + DELTA
rr,gg,bb = image.getpixel(nxt)
if not classifyer(rr,gg,bb):
break
bottom = nxt
return bottom[0]
def xy_to_bottom_right(x,y, objects):
"When x y is inside an object, return new xy for bottom right when search can be continued"
for topleft, bottomright in objects:
top, left = topleft
bottom, right = bottomright
if top < x and x < bottom and left < y and y < right:
return bottomright
return (x,y)
def merge_objects(objects):
for pos1 in range(len(objects)-1, 0 , -1):
for pos2 in range(len(objects)-1, 0 , -1):
if pos1 == pos2:
continue
obj1 = objects[pos1]
obj2 = objects[pos2]
t1, l1, b1, r1 = obj1[0] + obj1[1]
t2, l2, b2, r2 = obj2[0] + obj2[1]
if ((t1 <= t2 and t1 >= b2) and (l1 >= l2 and l1 <= r2)) or ((b1 >= t2 and b1 <= b2) and (r1 >= l2 and r1 <= r2)):
t = min(t1,t2)
l = min(l1,l2)
b = max(b1, b2)
r = max(r1, r2)
objects[pos2] = ((t,l),(b, r))
objects.remove(obj1)
break
for pos1 in range(len(objects)-1, 0 , -1):
obj1 = objects[pos1]
t1, l1, b1, r1 = obj1[0] + obj1[1]
if b1 - t1 + r1 - l1 < 20:
objects.remove(obj1)
return objects
def recognize(image, classifyer, classifyer_loose):
objects = []
width, height = image.size
print(f"Image: {width}x{height}")
x = 0
y = 0
while x < width:
x = xy_to_bottom_right(x,y, objects)[0]
while y < height:
y = xy_to_bottom_right(x,y, objects)[1]
#print(f"Scanning {x}/{y}")
r,g,b = image.getpixel((x,y))
if classifyer(r,g,b):
top = find_top(image, x, y, classifyer_loose)
left = find_left(image, x,y, classifyer_loose)
bottom = find_bottom(image, x,y, classifyer_loose)
right = find_right(image, x,y, classifyer_loose)
objects.append(((top, left), (bottom, right)))
print(f"Found: {x}/{y} --> {top},{left} {bottom},{right}")
y += 4
y = 0
x += 4
draw = ImageDraw.Draw(image)
objects = merge_objects(objects)
for topleft, bottomright in objects:
top, left = topleft
bottom, right = bottomright
draw.rectangle((top,left,bottom,right), outline="#ff0000")
image.save('test.png')
return objects
im = Image.open("Scrns/image_7.png")
cl_loose = lambda r,g,b: b > 80 and r < 130 and g < 130
cl_strict = lambda r,g,b: b > 110 and r < 130 and g < 130
#filter_image(im, cl_loose)
recognize(im, cl_strict, cl_loose)
exit(0)

162
test.py Normal file
View File

@@ -0,0 +1,162 @@
from pyautogui import *
import pyautogui
import time
import keyboard
import random
import win32api, win32con
import time
from PIL import Image, ImageDraw
WAIT4CLICK = 0.04
# see https://github.com/KianBrose/Image-Recognition-Botting-Tutorial/blob/master/README.txt
#def click(x,y):
# win32api.SetCursorPos((x,y))
# win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)
# win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)
# Positionen
# Resourcen Bereich 60px hoch, 850px breit
# Spielerfarben:
# b = 255 -> Blau
# r = 0 & rest nicht 0 -> cyan
def merge_objects(objects):
for pos1 in range(len(objects)-1, 0 , -1):
for pos2 in range(len(objects)-1, 0 , -1):
if pos1 == pos2:
continue
t1, l1, b1, r1 = objects[pos1]
t2, l2, b2, r2 = objects[pos2]
if ((t1 <= t2 and t1 >= b2) and (l1 >= l2 and l1 <= r2)) or ((b1 >= t2 and b1 <= b2) and (r1 >= l2 and r1 <= r2)):
t = min(t1,t2)
l = min(l1,l2)
b = max(b1, b2)
r = max(r1, r2)
objects[pos2] = (t,l,b,r)
objects.remove(objects[pos1])
break
return objects
def image_files_in_folder(folder):
return [os.path.join(folder, f) for f in os.listdir(folder) if re.match(r'.*\.(jpg|jpeg|png)', f, flags=re.I)]
def wait_for_start():
start_started = False
while 1:
if pyautogui.locateOnScreen('images/startscreen.png', confidence=0.9) != None:
print("Startscreen found")
start_started = True
time.sleep(0.5)
elif start_started:
print("Proceeding")
return
else:
print("Startscreen not found")
time.sleep(0.5)
def zoomout():
time.sleep(0.5)
print("Zooming out")
pyautogui.click(x=500, y=500)
time.sleep(WAIT4CLICK)
while keyboard.is_pressed('q') == False:
pyautogui.scroll(-10)
time.sleep(WAIT4CLICK)
print("Zooming out done")
def find_peasants():
print("Looking for peasants")
all_matches = []
for img_path in image_files_in_folder(os.path.join("images", "peasants")):
matches = list(pyautogui.locateAllOnScreen(img_path, confidence=0.8))
if len(matches) > 0:
all_matches += matches
print(f"Für Bild {img_path} wurden {len(matches)} matches gefunden: {matches}")
return all_matches
def order_peasants(number):
print(f"Trying to order {number} peasants.")
pyautogui.press('h')
time.sleep(WAIT4CLICK)
for i in range(number):
pyautogui.press('q')
time.sleep(WAIT4CLICK)
def assign_hotkeys():
pyautogui.press(',')
time.sleep(WAIT4CLICK)
pyautogui.hotkey('ctrl', '1')
time.sleep(WAIT4CLICK)
pyautogui.press('.')
time.sleep(WAIT4CLICK)
pyautogui.hotkey('ctrl', '2')
time.sleep(WAIT4CLICK)
pyautogui.press('.')
time.sleep(WAIT4CLICK)
pyautogui.hotkey('ctrl', '3')
time.sleep(WAIT4CLICK)
pyautogui.press('.')
time.sleep(WAIT4CLICK)
pyautogui.hotkey('ctrl', '4')
time.sleep(WAIT4CLICK)
def build_houses():
pyautogui.press('h')
time.sleep(WAIT4CLICK)
pyautogui.press('up')
time.sleep(0.5)
pyautogui.press('2')
time.sleep(WAIT4CLICK)
pyautogui.press('q')
time.sleep(WAIT4CLICK)
pyautogui.press('q')
time.sleep(WAIT4CLICK)
pyautogui.click(650, 90)
time.sleep(WAIT4CLICK)
pyautogui.press('3')
time.sleep(WAIT4CLICK)
pyautogui.rightClick(650, 90)
time.sleep(WAIT4CLICK)
pyautogui.press('4')
time.sleep(WAIT4CLICK)
pyautogui.press('q')
time.sleep(WAIT4CLICK)
pyautogui.press('q')
time.sleep(WAIT4CLICK)
pyautogui.click(300, 200)
time.sleep(WAIT4CLICK)
wait_for_start()
#zoomout() FUUU
order_peasants(4)
assign_hotkeys()
build_houses()
pyautogui.press('1')
pyautogui.press('1')
cnt = 0
while False: #keyboard.is_pressed('q') == False:
time.sleep(1)
peasant_locations = merge_objects(find_peasants())
pic = pyautogui.screenshot()
draw = ImageDraw.Draw(pic)
for x,y,a,b in peasant_locations:
draw.rectangle((x,y,x+a,y+b), outline="#ff0000")
pic.save(f"image_{cnt}.png")
cnt += 1