Browse Source
- pulls more of the travis-ci s3 deploy into the repo so we can potentially migrate to another CI system - fixed the sizes output and added verbose compiler version to cmake (#6322) - fixed filenames for firmware uploaded to s3 (was broken by the changes yesterday) - fixed some broken git version display in cmake - Makefile organization - simplified .travis.yml - added a print to know which config the nuttx patch was being applied to - docker_run.sh now respects PX4_DOCKER_REPO for setting the docker image, but defaults to the good production nuttx imagesbg
8 changed files with 166 additions and 224 deletions
@ -1,15 +1,28 @@
@@ -1,15 +1,28 @@
|
||||
#! /bin/bash |
||||
|
||||
if [ -z "$PX4_DOCKER_REPO" ]; then |
||||
PX4_DOCKER_REPO=px4io/px4-dev-nuttx:2017-01-14 |
||||
fi |
||||
|
||||
PWD=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) |
||||
SRC_DIR=$PWD/../ |
||||
|
||||
CCACHE_DIR=${HOME}/.ccache |
||||
mkdir -p ${CCACHE_DIR} |
||||
|
||||
X11_TMP=/tmp/.X11-unix |
||||
|
||||
docker run -it --rm -w ${SRC_DIR} \ |
||||
-v ${SRC_DIR}:${SRC_DIR}:rw \ |
||||
-v /tmp/.X11-unix:/tmp/.X11-unix:ro \ |
||||
-v ${CCACHE_DIR}:${CCACHE_DIR}:rw \ |
||||
-e AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \ |
||||
-e AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \ |
||||
-e CCACHE_DIR=${CCACHE_DIR} \ |
||||
-e CI=${CI} \ |
||||
-e DISPLAY=$DISPLAY \ |
||||
-e GIT_SUBMODULES_ARE_EVIL=1 \ |
||||
-e LOCAL_USER_ID=`id -u` \ |
||||
px4io/px4-dev-nuttx:2017-01-08 /bin/bash -c "$@" |
||||
-e TRAVIS_BRANCH=${TRAVIS_BRANCH} \ |
||||
-e TRAVIS_BUILD_ID=${TRAVIS_BUILD_ID} \ |
||||
-v ${CCACHE_DIR}:${CCACHE_DIR}:rw \ |
||||
-v ${SRC_DIR}:${SRC_DIR}:rw \ |
||||
-v ${X11_TMP}:${X11_TMP}:ro \ |
||||
${PX4_DOCKER_REPO} /bin/bash -c "$@" |
||||
|
@ -1,44 +0,0 @@
@@ -1,44 +0,0 @@
|
||||
#!/usr/bin/env python |
||||
|
||||
import glob |
||||
import zipfile |
||||
import os |
||||
import re |
||||
import shutil |
||||
|
||||
S3_DIR = 's3deploy-branch' |
||||
S3_ARCHIVE_DIR = 's3deploy-archive' |
||||
|
||||
if not os.path.isdir(S3_DIR): |
||||
os.mkdir(S3_DIR) |
||||
|
||||
if not os.path.isdir(S3_ARCHIVE_DIR): |
||||
os.mkdir(S3_ARCHIVE_DIR) |
||||
|
||||
shutil.copy("Firmware.zip", S3_ARCHIVE_DIR) |
||||
|
||||
def extract_file_only(filename, dest): |
||||
# extract firmware files without paths |
||||
f_src = archive.open(filename, 'r') |
||||
data = f_src.read() |
||||
with open(os.path.join(dest, |
||||
os.path.basename(filename)), 'w') as f_dst: |
||||
f_dst.write(data) |
||||
f_src.close() |
||||
|
||||
# get all zip files in Packages directory |
||||
for zip_filename in glob.glob("Packages/*.zip"): |
||||
|
||||
# open zipfile |
||||
with zipfile.ZipFile(zip_filename, 'r') as archive: |
||||
|
||||
# look for interesting names |
||||
for filename in archive.namelist(): |
||||
|
||||
# extract firmware files |
||||
if os.path.splitext(filename)[1] == '.px4': |
||||
extract_file_only(filename, S3_DIR) |
||||
|
||||
# copy px4fmu-v4_default xml files for qgroundcontrol |
||||
if re.match(filename, r'.*px4fmu-v4_default.*\.xml') is not None: |
||||
extract_file_only(filename, S3_DIR) |
@ -0,0 +1,21 @@
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash |
||||
|
||||
filename=${1} |
||||
|
||||
# Requires these ENV variables |
||||
# AWS_ACCESS_KEY_ID |
||||
# AWS_SECRET_ACCESS_KEY |
||||
# AWS_S3_BUCKET |
||||
|
||||
[ -z "$AWS_ACCESS_KEY_ID" ] && { echo "ERROR: Need to set AWS_ACCESS_KEY_ID"; exit 1; } |
||||
[ -z "$AWS_SECRET_ACCESS_KEY" ] && { echo "ERROR: Need to set AWS_SECRET_ACCESS_KEY"; exit 1; } |
||||
[ -z "$AWS_S3_BUCKET" ] && { echo "ERROR: Need to set AWS_S3_BUCKET"; exit 1; } |
||||
|
||||
if [ -f ${filename} ]; then |
||||
base_file_name=`basename $filename` |
||||
short_file_name=${base_file_name#nuttx-} |
||||
s3cmd --access_key=${AWS_ACCESS_KEY_ID} --secret_key=${AWS_SECRET_ACCESS_KEY} put ${filename} s3://${AWS_S3_BUCKET}/${short_file_name} |
||||
else |
||||
echo "ERROR: ${file} doesn't exist" |
||||
exit 1 |
||||
fi |
Loading…
Reference in new issue