LSTM Input shape?

I’m confused about how LSTM input sizes work.

I have a scenario where im trying to predict the weight of a person based on weight and height, both in time series.

I can’t seem to figure out where I’m going wrong in terms of dimensions:

from datetime import datetime
import numpy as np
import pandas as pd
import math
from keras.layers import Dense, Activation, LSTM, Input, concatenate
from keras.models import Model

def create_dataset(dataset, window_length=1):
	dataX, dataY = [], []
	for i in range(len(dataset)-window_length-1):
		# print("dataX from {} to {}".format(i, i+window_length))
		# print("dataY from {}".format(i+window_length))
		dataX.append(dataset[i:(i+window_length)])
		dataY.append(dataset[(i + window_length):])


	return np.array(dataX), np.array(dataY)

def buildModel(dataLength, labelLength):

	weight = Input(shape=(dataLength, 1), name="weight")
	height = Input(shape=(dataLength, 1), name="height")

	weightLayers = LSTM(64, return_sequences=False)(weight)
	heightLayers = LSTM(64, return_sequences=False)(height)

	output = concatenate([ weightLayers, heightLayers ])

	output = Dense(labelLength, activation="linear", name="weightedAverage_output")(output)

	model = Model(
		inputs=[weight, height],
		outputs=[output]
		)

	model.compile(optimizer="rmsprop", loss="mse")

	return model

bogus = {
	"weight": range(100,200),
	"height": range(150,250)
}

dataset = pd.DataFrame(bogus)



train_size 		= int(len(dataset) * 0.90)
test_size 		= len(dataset) - train_size
train, test 	= dataset[:train_size], dataset[-test_size:]

# print("*" * 30)
# print(train.head())
# print(train.tail())
# print("==> {}".format(len(train)))
# print("*" * 30)
# print(test.head())
# print(test.tail())
# print("==> {}".format(len(test)))
# input(">")

height_train = np.array(train["height"].values.tolist()).reshape((-1, 1)).astype('float32')
weight_train = np.array(train["weight"].values.tolist()).reshape((-1, 1)).astype('float32')

height_test = np.array(test["height"].values.tolist()).reshape((-1, 1)).astype('float32')
weight_test = np.array(test["weight"].values.tolist()).reshape((-1, 1)).astype('float32')

x_train_height, y_train_height  = create_dataset(height_train, 60)
x_train_weight, y_train_weight  = create_dataset(weight_train, 60)

x_test_height, y_test_height  = create_dataset(height_test, 60)
x_test_weight, y_test_weight  = create_dataset(weight_test, 60)



model = buildModel(60,4)
model.fit(
	[ 
		x_train_weight,
		x_train_height,
	],

	[ 
		y_train_weight
	],
	
	validation_data=(
		[
			x_test_weight,
			x_test_height,
		],
		[
			y_test_weight
		],
	),

	epochs=1,
	batch_size=3000,
	callbacks=[
		# board.createTensorboardConfig("log/graph"),
	]
)

I get this error:

ValueError: Error when checking target: expected weightedAverage_output to have shape (None, 4) but got array with shape (29, 1)

Pretty sure i’m going wrong somewhere in input and output dimensions.

Any ideas?