#https://numpy.org/doc/stable/user/absolute_beginners.html
#https://docs.python.org/3/tutorial/introduction.html#lists
from sklearn.datasets import fetch_olivetti_faces
data, targets = fetch_olivetti_faces(return_X_y=True)
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import VotingRegressor
from sklearn.ensemble import GradientBoostingRegressor
import sklearn
from sklearn.tree import export_text
from sklearn import tree


from matplotlib import image
from matplotlib import pyplot

# load image as pixel array
from PIL import Image
import numpy
img = Image.open('face-64.png').convert('L')
# summarize shape of the pixel array
imgarr = numpy.array(img) 

print(imgarr.shape)
imgarrf = imgarr / 255 #normalize

img2 =  numpy.array(Image.open('face2.jpg').convert('L')) / 255
secondladytop = img2.flatten()[0: 2048]

tophalflady =  imgarrf.flatten()[0: 2048]  #4096 / 2
bottomhalflady = imgarrf.flatten()[2048: ] 

print(len(data)) #400 rows
print(len(data[0])) #4096 columns = 64 x 64 = 4096
n_pixels = data.shape[1] # (400,4096) [1] = 4096
print(n_pixels) #should be 4096 flattened
#slice the list data and target using index : meens start
# Upper half of the faces from 0  numpy arrays [rom start to middle]
#. // : Divides the number on its left by the number on its right, rounds down the answer, and returns a whole number.

X_train = data[:, : (n_pixels + 1) // 2]
# Lower half of the faces [for all from end to middle] from end to 
y_target = data[:, n_pixels // 2 :]

# make a prediction with a stacking ensemble
from sklearn.datasets import make_regression
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.svm import SVR
from sklearn.ensemble import StackingRegressor

from sklearn.multioutput import MultiOutputRegressor

# define dataset
# define the base models
level0 = list()
#level0.append(('knn', KNeighborsRegressor()))
level0.append(('cart', DecisionTreeRegressor()))
#level0.append(('svm', SVR()))
# define meta learner model
level1 = LinearRegression()
# define the stacking ensemble  stack_method='predict'
model = MultiOutputRegressor( StackingRegressor(estimators=level0, final_estimator=level1, cv=None) )
# fit the model on all available data
model.fit(X_train, y_target)
# make a prediction for one example
arrpredict = model.predict(data)



#arrpredict = numpy.transpose(arrpredict)

# load and display an image with Matplotlib


#print(tophalflady.shape)#2048,1

#IMPORTANT UNDELETE
#tophalflady = numpy.expand_dims(tophalflady, axis=0)
tophalflady = numpy.expand_dims(tophalflady, axis=0)


#print(tophalflady.shape)#2048,1
#print(arrpredict.shape) #1,2048 linear flatten array, reshape to 2d
#print('yes')
#arrpredict = numpy.transpose(arrpredict)
#print(arrpredict.shape) #1,2048 linear flatten array, reshape to 2d
#print(tophalflady.shape)#2048,1

#merge array top and predicted
topbottom = numpy.concatenate((tophalflady, arrpredict), axis=0)

# display the array of pixels as an image
#The image is quantized to 256 grey levels and stored as unsigned 8-bit integers; 
# #the loader will convert these to floating point values on the interval [0, 1],
#  which are easier to work with for many algorithms.
from numpy import asarray
#pyplot.imshow(oneimg.reshape( (64,64))) #reshape to 64 by 64
pyplot.imshow( topbottom.reshape((64,64)) ) #reshape to 32  rows and 64 columns
pyplot.show()



