commit 016dbd08144540afbc0aa474aa822ea081c75439 Author: Marc Plano-Lesay Date: Fri Dec 12 15:30:04 2025 +1100 Migrate from Bazel diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..87174b6 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +/public/ diff --git a/.mise.toml b/.mise.toml new file mode 100644 index 0000000..3d5962b --- /dev/null +++ b/.mise.toml @@ -0,0 +1,2 @@ +[tools] +zola = "0.21.0" diff --git a/config.toml b/config.toml new file mode 100644 index 0000000..5073465 --- /dev/null +++ b/config.toml @@ -0,0 +1,41 @@ +base_url = "https://enoent.fr" +title = "enoent.fr" +description = "Random musings of a software engineer." +author = "Marc Plano-Lesay" +theme = "retro-future" +compile_sass = true +build_search_index = false +generate_feeds = true +default_language = "en" + +taxonomies = [ + {name = "tags", feed = true}, + {name = "series", feed = false} +] + +[markdown] +highlight_code = true +highlight_theme = "css" +render_emoji = false +extra_syntaxes_and_themes = [] +highlight_line_numbers = true + +[extra] +mermaid = true +bio = "Software engineer at Canva, building things with code and occasionally writing about it." + +nav_links = [ + { name = "About", url = "/about" } +] + +socials = [ + { name = "github", url = "https://github.com/Kernald", icon = "github" }, + { name = "mastodon", url = "https://androiddev.social/@kernald", icon = "mastodon" }, + { name = "twitter", url = "https://x.com/marcpl31", icon = "twitter" }, + { name = "forgejo", url = "https://git.enoent.fr/kernald/", icon = "forgejo" } +] + +[extra.analytics] +service = "plausible" +id = "enoent.fr" +self_hosted_url = "https://analytics.enoent.fr" diff --git a/content/_index.md b/content/_index.md new file mode 100644 index 0000000..84357dd --- /dev/null +++ b/content/_index.md @@ -0,0 +1,4 @@ ++++ +title = "Blog" +redirect_to = "/posts/" ++++ diff --git a/content/about/_index.md b/content/about/_index.md new file mode 100644 index 0000000..7615813 --- /dev/null +++ b/content/about/_index.md @@ -0,0 +1,16 @@ ++++ +title = "About" ++++ + +# Hi, I'm Marc + +I'm a software engineer currently working at Canva. I build software – mostly mobile and backend stuff involving Android, Kotlin, and various infrastructure things. I have a weird fascination with build systems. + +## Why this blog exists + +Mostly as a digital garden for things I figure out and don't want to figure out again in six months. If it helps someone else avoid the same rabbit holes, even better. + +## Tech I work with + +Android, Kotlin, Nix, Bazel, Kubernetes, and whatever else seems like a good idea at the time. + diff --git a/content/posts/_index.md b/content/posts/_index.md new file mode 100644 index 0000000..43a9a99 --- /dev/null +++ b/content/posts/_index.md @@ -0,0 +1,6 @@ ++++ +title = "Blog" +sort_by = "date" +paginate_by = 5 +template = "index.html" ++++ diff --git a/content/posts/android-display-a-dialog-from-an-appwidget.md b/content/posts/android-display-a-dialog-from-an-appwidget.md new file mode 100644 index 0000000..3f36448 --- /dev/null +++ b/content/posts/android-display-a-dialog-from-an-appwidget.md @@ -0,0 +1,184 @@ ++++ +template = "article.html" +title = "Android: display a Dialog from an AppWidget" +date = 2014-04-06T22:27:19+02:00 +description = "A workaround for displaying dialogs from Android AppWidgets, which normally can't show dialogs due to context limitations." + +[taxonomies] +tags = ["android"] ++++ + +## Issue + +When you want to display a dialog, you don't only need a context, you need an +activity context. From an activity, displaying a dialog is pretty +straightforward: + +*Display a dialog from an activity* + +```java +new AlertDialog.Builder(MyActivity.this) + .setTitle("Dialog title") + .setMessage("Dialog message") + .setPositiveButton(android.R.string.yes, new DialogInterface.OnClickListener() { + public void onClick(DialogInterface dialog, int which) { + // Handle a positive answer + } + }) + .setNegativeButton(android.R.string.no, new DialogInterface.OnClickListener() { + public void onClick(DialogInterface dialog, int which) { + // Handle a negative answer + } + }) + .setIcon(R.drawable.ic_dialog_alert) + .show(); +``` + +Okay, that's a pretty usual code sample. But what about displaying it from an +app-widget? + + + +## Display a Dialog from an AppWidget + +What is needed to display a Dialog? An Activity. So let's open an Activity, +which will open the Dialog. When you update your AppWidget, via a RemoteView: + +*Open an activity from an AppWidget* + +```java +Intent intent = new Intent(getApplicationContext(), MyActivity.class); + +// Old activities shouldn't be in the history stack +intent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK | Intent.FLAG_ACTIVITY_CLEAR_TASK); + +PendingIntent pendingIntent = PendingIntent.getActivity(getApplicationContext(), + 0, + intent, + PendingIntent.FLAG_UPDATE_CURRENT); + +// Link the PendingIntent to a Button +rv.setOnClickPendingIntent(R.id.btn_dialog, pendingIntent); +``` + +When the button `btn_dialog` is pressed, the activity `MyActivity` is launched. +Let's say we have the AlertDialogBuilder code from the first sample in +`MyActivity.onCreate()`, we have a dialog displayed from an app-widget. But there's +an issue: we don't want the activity to be visible. + +## Hide the proxy activity + +The activity must be displayed. But what about making it fully transparent? +That's an easy, two-steps task. First, in the manifest, remove any decoration +and hide this activity from history: + +*Remove Activity decorations* + +```xml + +``` + +Then in the activity itself, set a transparent background: + +*Set a transparent background* + +```java +public class MyActivity extends Activity { + public void onCreate(Bundle savedInstanceState) { + super.onCreate(savedInstanceState); + getWindow().setBackgroundDrawable(new ColorDrawable(0)); + + // Dialog creation goes here + } +} +``` + +At this point, there are two issues. First, if the dialog is displayed from the +app-widget, it will appear in the recent apps list. That's probably not wanted. +There's again a simple solution. In the manifest: + +*Hide the Activity from recent apps* + +```xml + +``` + +The second issue is more visible. The theme `Theme.Translucent.NoTitleBar` +refers to a pre-ICS theme, hence the Gingerbread-looking dialog. + +{{ img(src="/images/articles/android-display-a-dialog-from-an-appwidget/1-dialog-gb-theme.png", caption="Default theme") }} + +To use the Holo theme on 3.0+ devices, the dialog construction code has to be +tweaked a little: + +*Applying a theme to the dialog* + +```java +Context context; +// For a custom theme: +context = new ContextThemeWrapper(MyActivity.this, R.style.dialog); + +// For the Holo one on 3.0+ devices, fallback on 1.x/2.x devices: +if (android.os.Build.VERSION.SDK_INT >= Build.VERSION_CODES.HONEYCOMB) { + context = new ContextThemeWrapper(MyActivity.this, android.R.style.Theme_Holo); +} else { + context = new ContextThemeWrapper(MyActivity.this, android.R.style.Theme_Dialog); +} + +new AlertDialog.Builder(context) + .setTitle("Dialog title") + .setMessage("Dialog message") + .setPositiveButton(android.R.string.yes, new DialogInterface.OnClickListener() { + public void onClick(DialogInterface dialog, int which) { + // Handle a positive answer + } + }) + .setNegativeButton(android.R.string.no, new DialogInterface.OnClickListener() { + public void onClick(DialogInterface dialog, int which) { + // Handle a negative answer + } + }) + .setIcon(R.drawable.ic_dialog_alert) + .show(); +``` + +{{ img(src="/images/articles/android-display-a-dialog-from-an-appwidget/2-dialog-holo-theme.png", caption="Holo theme") }} + +If multiple dialogs are needed, the activity could be reused by adding +parameters to the intent, and display the needed dialog accordingly. You can +also call this activity like any other from your other activities, and share the +dialog creation code. Here's an example of a generic dialog activity, called +from a button on another activity: + +{{ img(src="/images/articles/android-display-a-dialog-from-an-appwidget/3-in-app-shared-dialog.png", caption="Dialog on top of a basic activity") }} + +Last point: even if the activity is invisible, it still needs to be closed when +the dialog is hidden. Don't forget to call `Activity.finish()` when the dialogs +are dismissed. Starting with API 17, you can use a +`DialogInterface.OnDismissListener()`: + +*Finishing the activity* + +```java +new AlertDialog.Builder(context) + .setOnDismissListener(new DialogInterface.OnDismissListener() { + @Override + public void onDismiss(DialogInterface dialogInterface) { + finish(); + } + }) + // … + .show() +``` + +You can find a full sample code on +[GitHub](https://github.com/Kernald/android-dialog-activity-sample). diff --git a/content/posts/android-things-first-look.md b/content/posts/android-things-first-look.md new file mode 100644 index 0000000..e26c562 --- /dev/null +++ b/content/posts/android-things-first-look.md @@ -0,0 +1,436 @@ ++++ +template = "article.html" +title = "Android Things: first look" +date = 2017-01-06T10:55:08+01:00 +description = "An introduction to Android Things, Google's IoT platform that brings the Android ecosystem to embedded devices." + +[taxonomies] +tags = ["android", "iot"] ++++ + +## What is Android Things? + +Android Things is an alternative Android version, announced at Google I/O 2015, +and released as a first developer preview in December 2016. Its purpose is to +develop embedded IoT devices, with a known and widely documented Android +ecosystem basis. + +It's currently running on three different boards: the Intel Edison, the NXP Pico +i.MX6UL, and the Raspberry Pi 3. Some higher-end boards are coming soon. + +On the SDK side, Android Things comes with a specific support library to ease +low-level hardware usage. It consists in two parts: the Peripheral I/O API, +which supports GPIO, PWM, I2C, SPI and UART, and the User Driver API, which +allows a developer to write a hardware-specific, high-level driver, to ease +hardware reusability by injecting events into the Android framework. Other +applications can in turn use those events without having to interact with the +hardware directly. + +There's a downside: the bundled Android is not as complete as the one you can +find on a phone. Most of the standard applications aren't installed (Calendar, +Phone…), and standard content providers are absent too (MediaProvider, +Dictionary…). + +Android Things supports displays, with the default Android UI toolkit. However, +the display is a bit different from what you're used to seeing on an Android +device: there's no notification bar, navigation bar or anything, the running +application will use the full display. That is, if it uses it at all: displays +are purely optional. + + + +## Installing Android Things + +Installation depends on the device you're targeting. Up-to-date, device-specific +instructions are available in [the official +documentation](https://developer.android.com/things/hardware/developer-kits.html). + +Note that there is no emulator available (yet?), you'll need to install Android +Things on a real board. + +The next steps of this post assume your local adb is connected to your device +(that is, `adb devices` is listing it as attached). + +## Creating a new application + +Android Things uses the same Activity, Service… lifecycles you're used to +seeing in any Android application. As so, creating an Android Things project is +really close to creating an Android one: + +- create a blank project on Android Studio (selecting a form factor is + mandatory, and Android Studio doesn't support Things yet, keep Phone + and Tablet selected), without any activities +- in the `build.gradle`, remove all the dependencies and add the Things support + library: + + ```groovy + apply plugin: 'com.android.application' + + android { + compileSdkVersion 24 + buildToolsVersion "25.0.2" + defaultConfig { + applicationId "fr.enoent.mything" + minSdkVersion 24 + targetSdkVersion 24 + versionCode 1 + versionName "1.0" + } + buildTypes { + release { + minifyEnabled false + proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro' + } + } + } + + dependencies { + provided 'com.google.android.things:androidthings:0.1-devpreview' + } + ``` + +- add a reference to the Things support library in the `AndroidManifest.xml`: + + ```xml + + + + + + + + + ``` + +- the last step is to create an Activity: create a new blank, without layout, + non-support Activity from Android Studio. You'll also need to add a + Things-specific intent-filter to this Activity in the Manifest, so it will + start automatically on boot. Keep the Launcher intent-filter to make it easier + to start this Activity from Android Studio: + + ```xml + + + + + + + + + + + + + + + + + + + + + + + + ``` + +Now you have an Activity, which is supposed to start automatically on boot. +Let's check that by adding a log in the `onCreate` method: + +*MainActivity.java* + +```java +public class MainActivity extends Activity { + @Override + protected void onCreate(@Nullable Bundle savedInstanceState) { + super.onCreate(savedInstanceState); + + Log.d("MainActivity", "onCreate"); + } +} +``` + +As you can see, it's perfectly standard Android code. + +You can run it from Android Studio, and you should see the `onCreate` mention in +the logs. + +## Lights, action! + +I won't cover Android UI in this post, as it's something really standard you can +already find all over the web. Let's do a much more fun UI: a blinking LED to +indicate the application is running. + +### Connecting the LED + +I only have a Raspberry Pi 3, so I won't be able to cover the other boards for +this part. You can find the Pi pinout details +[here](https://developer.android.com/things/hardware/raspberrypi-io.html). + +The circuit is dead simple: connect the LED's cathode to Pi's ground, a small +resistor in series with your LED's anode (Pi's GPIOs are 3v3), and the other +side of the resistor to the Pi's BCM6. You can use any pin labelled as GPIO in +the diagram. + +### Pimp your 'droid + +Time to go back to Android Studio. First step: listing the available GPIOs on +your board. There's a new class in the support library to access the GPIOs: + `PeripheralManagerService`. + +*MainActivity.java* + +```java +@Override +protected void onCreate(@Nullable Bundle savedInstanceState) { + super.onCreate(savedInstanceState); + + PeripheralManagerService service = new PeripheralManagerService(); + Log.d("MainActivity", "Available GPIOs: " + service.getGpioList()); +} +``` + +This code will list all the GPIOs we're allowed to use. On a Pi, hopefully +you'll find the BCM6 pin you connected the LED to. Here's the output on a Pi 3: + +``` +Available GPIOs: [BCM12, BCM13, BCM16, BCM17, BCM18, BCM19, BCM20, BCM21, +BCM22, BCM23, BCM24, BCM25, BCM26, BCM27, BCM4, BCM5, BCM6] +``` + +The next step is to initialize this GPIO. We'll use the +`PeripheralManagerService` once more to get a reference to it, then set it up to +`LOW` (0v) by default: + +*MainActivity.java* + +```java +private static final String GPIO_PIN_NAME_LED = "BCM6"; + +@Override +protected void onCreate(@Nullable Bundle savedInstanceState) { + super.onCreate(savedInstanceState); + + PeripheralManagerService service = new PeripheralManagerService(); + + try { + Gpio ledGpio = service.openGpio(GPIO_PIN_NAME_LED); + ledGpio.setDirection(Gpio.DIRECTION_OUT_INITIALLY_LOW); + } catch (IOException e) { + Log.e("MainActivity", "Error on PeripheralIO API", e); + } +} +``` + +Now, the only thing left to do is to toggle the GPIO value. This is a single +call: `ledGpio.setValue(!ledGpio.getValue());`. Toggling it every second comes +with a purely Android-oriented solution: a `Handler`, and a delayed +`Runnable`. + +*MainActivity.java* + +```java +public class MainActivity extends Activity { + private static final int INTERVAL_BETWEEN_BLINKS_MS = 1000; + private static final String GPIO_PIN_NAME_LED = "BCM6"; + + private Handler handler = new Handler(); + private Gpio ledGpio; + + private Runnable blinkRunnable = new Runnable() { + @Override + public void run() { + // Exit if the GPIO is already closed + if (ledGpio == null) { + return; + } + + try { + ledGpio.setValue(!ledGpio.getValue()); + handler.postDelayed(blinkRunnable, INTERVAL_BETWEEN_BLINKS_MS); + } catch (IOException e) { + Log.e("MainActivity", "Error on PeripheralIO API", e); + } + } + }; + + @Override + protected void onCreate(@Nullable Bundle savedInstanceState) { + super.onCreate(savedInstanceState); + + PeripheralManagerService service = new PeripheralManagerService(); + + try { + ledGpio = service.openGpio(GPIO_PIN_NAME_LED); + ledGpio.setDirection(Gpio.DIRECTION_OUT_INITIALLY_LOW); + + handler.post(blinkRunnable); + } catch (IOException e) { + Log.e("MainActivity", "Error on PeripheralIO API", e); + } + } +} +``` + +## User drivers + +Google already ships some user-drivers, ready to use, in the form of Gradle +libraries. Let's take the Button driver as an example. + +### Installation + +It's a simple Gradle dependency to add: +`compile 'com.google.android.things.contrib:driver-button:0.1'` + +### Usage + +The button driver provides a simple class which you feed with the GPIO name to +use, and a callback: + +*MainActivity.java* + +```java +public class MainActivity extends Activity { + private static final String GPIO_PIN_NAME_BUTTON = "BCM6"; + + Button button; + + @Override + protected void onCreate(@Nullable Bundle savedInstanceState) { + super.onCreate(savedInstanceState); + Log.d("MainActivity", "onCreate"); + + try { + button = new Button(GPIO_PIN_NAME_BUTTON, + Button.LogicState.PRESSED_WHEN_HIGH + ); + button.setOnButtonEventListener(new Button.OnButtonEventListener() { + @Override + public void onButtonEvent(Button button, boolean pressed) { + Log.d("MainActivity", "Button has been pressed!"); + } + }); + } catch (IOException e) { + Log.e("MainActivity", "Unable to configure the button", e); + } + } + + @Override + protected void onDestroy() { + super.onDestroy(); + + try { + button.close(); + } catch (IOException e) { + Log.e("MainActivity", "There's been an error while closing the button"); + } + } +} +``` + +Notice that you'll have to close the GPIO when you leave your Activity +(`button.close()`). + +And with those simple dozen lines of Java, you can use your hardware button +to trigger things in your application. The button driver also provides a way to +bind your hardware button to a software event, then any application can simply +listen to the software key event. This is an example binding the button to the +key `A`: + +*MainActivity.java* + +```java +public class MainActivity extends Activity { + private static final String GPIO_PIN_NAME_BUTTON = "BCM6"; + + ButtonInputDriver inputDriver; + + @Override + protected void onCreate(@Nullable Bundle savedInstanceState) { + super.onCreate(savedInstanceState); + + try { + inputDriver = new ButtonInputDriver(GPIO_PIN_NAME_BUTTON, + Button.LogicState.PRESSED_WHEN_HIGH, + KeyEvent.KEYCODE_A // the keycode to send + ); + inputDriver.register(); + } catch (IOException e) { + Log.e("MainActivity", "Error while binding button", e); + } + } + + @Override + protected void onDestroy() { + super.onDestroy(); + + inputDriver.unregister(); + try { + inputDriver.close(); + } catch (IOException e) { + Log.e("MainActivity", "Error while unregistering the button driver", e); + } + } + + @Override + public boolean onKeyDown(int keyCode, KeyEvent event) { + if (keyCode == KeyEvent.KEYCODE_A) { + Log.d("MainActivity", "Button has been pressed"); + return true; + } + return super.onKeyDown(keyCode, event); + } +} +``` + +## Conclusion + +Even if it may be early to conclude anything from this preview, I have mixed +feelings regarding the state of Android Things. + +While having the whole UI +toolkit available is great for industry-oriented hardware, I don't really see +the point of it in consumer products. Most of the connected devices uses LEDs or +a screen with minimal information. From my point of view, a connected device +should be set up then forgot, and work without requesting anything from me past +the initial configuration. I don't want to press a button to turn the lights on. +I don't want to take my phone, unlock it, start the relevant application, then +press a button to turn the lights on. I want the lights to turn on when I need +it. Using a motion sensor, location tracking from my phone, whatever. As long as +I have to interact with the lights, they are not smarter than my good old light +bulbs with their big button I can use even in the dark with both hands full. + +The first thing I expected from this preview was Weave integration. Which is +completely absent (I guess it will come eventually). You can start making a +connected device powered by Google technologies, but *you can't use the standard +Google tries pushing forward to control it*. You'll have to write your own +control interface (probably a REST API, which means integrating a web-server in +your Android Things application). + +Having the ability to work on the IDE I'm used to, with an SDK I already know, +and being able to reuse the ton of existing Java libraries is really great +though. It makes the entry barrier much lower than usual embedded development. +That is, when you have someone else to do the hardware part for you. + +I know computing power and storage comes nearly free nowadays, but being used to +use AVRs, MSPs…, I can't help thinking a Raspberry Pi 3 is totally +overkill for that kind of use. I just used a 1.2 GHz, quad-core SoC and 600 MB +of storage *to blink an LED*. Most of those devices will only be +remote-controlled and will send data for analysis anyway. An ESP8266 is much +smaller, uses less power, comes with built-in Wi-Fi, for a couple bucks. + +In the end, I think Google shouldn't have used Android as a basis for that kind +of IoT platform. While it surely looks attractive, it comes with multiple +drawbacks inherent to the idea itself. A Google-branded toolchain for something +like the ESP8266 with built-in Weave support and first-class Firebase/Google +Cloud client libraries would have been a much better approach. diff --git a/content/posts/arduino-leonardo-fully-featured-keyboard.md b/content/posts/arduino-leonardo-fully-featured-keyboard.md new file mode 100644 index 0000000..baf67b1 --- /dev/null +++ b/content/posts/arduino-leonardo-fully-featured-keyboard.md @@ -0,0 +1,192 @@ ++++ +template = "article.html" +title = "Arduino Leonardo fully-featured keyboard" +date = 2014-05-04T23:03:16+02:00 +description = "Building a fully-featured keyboard emulator with Arduino Leonardo, including support for modifier keys and special characters." + +[taxonomies] +tags = ["arduino"] ++++ + +The Leonardo has a simple [keyboard API](http://arduino.cc/en/Reference/MouseKeyboard). +I needed a way to emulate a keyboard (from a joystick and arcade buttons - you +see where I'm going now). Here's how I did it. + + + +## First try + +Starting with an [Arduino sample](http://arduino.cc/en/Tutorial/KeyboardAndMouseControl), +we can make a first attempt. The circuit is the same as the sample - simply +adjust the pins to your needs. It won't need any change until the end of this +post. + +_Basic keyboard_ + +```cpp +const int upButton = 2; +const int downButton = 3; +const int leftButton = 4; +const int rightButton = 5; + +void setup() { + pinMode(upButton, INPUT); + pinMode(downButton, INPUT); + pinMode(leftButton, INPUT); + pinMode(rightButton, INPUT); + pinMode(mouseButton, INPUT); + + Keyboard.begin(); +} + +void loop() { + if (digitalRead(upButton) == HIGH) { + Keyboard.write(KEY_UP_ARROW); + } + if (digitalRead(downButton) == HIGH) { + Keyboard.write(KEY_DOWN_ARROW); + } + if (digitalRead(leftButton) == HIGH) { + Keyboard.write(KEY_LEFT_ARROW); + } + if (digitalRead(rightButton) == HIGH) { + Keyboard.write(KEY_RIGHT_ARROW); + } +} +``` + +This however has a major issue. Each `Keyboard.write()` call generates a +press/release cycle. If you keep a button pushed, instead of a single, long key +press, the computer will receive a ton of press/release events. We need to keep +the buttons states between `loop()` calls. + +## Adding memory to the keyboard + +Here's a second attempt, with two modifications. First, to ease the +addition/removal of a button, the code uses arrays instead of doing all steps +four times. Second thing changed: each button now remember its state. + +_Stateful keyboard_ + +```cpp +// Number of buttons to handle +const int buttonsCount = 4; + +// Arduino PINs to use +const int pins[buttonsCount] = { + 2, + 3, + 4, + 5 +}; + +// Keys to send (order has to match the pins array) +const byte keys[buttonsCount] = { + KEY_UP_ARROW, + KEY_DOWN_ARROW, + KEY_LEFT_ARROW, + KEY_RIGHT_ARROW +}; + +bool status[buttonsCount] = {LOW}; + +void setup() { + for (int i = 0; i < buttonsCount; ++i) { + pinMode(pins[i], INPUT); + } + + Keyboard.begin(); +} + +void loop() { + for (int i = 0; i < buttonsCount; ++i) { + const int pinStatus = digitalRead(pins[i]); + if (pinStatus != status[i]) { + status[i] = pinStatus; + if (pinStatus == HIGH) { + Keyboard.press(keys[i]); + } else { + Keyboard.release(keys[i]); + } + } + } +} +``` + +So… the keyboard now remembers which buttons are pressed, and should generate +a single couple of events for each button press/release. _Should_. There's still +an issue: mechanical buttons are not perfect. Many events are still generated. +This is due to a phenomenon called [bounce](http://en.wikipedia.org/wiki/Switch#Contact_bounce). + +## Debouncing the keyboard + +A simple way to debounce a button is, well, really simple: ignore all changes to +the state of the button during a short delay after an initial change. While it's +not the most precise way and could be problematic in a more complex scenario, +it's perfectly fine to do this for a keyboard, given we keep this delay short +enough. + +Let's throw in an array to remember the last event acknowledged by the keyboard: + +_Debounced keyboard_ + +```cpp +// Number of buttons to handle +const int buttonsCount = 4; + +// Arduino PINs to use +const int pins[buttonsCount] = { + 2, + 3, + 4, + 5 +}; + +// Keys to send (order has to match the pins array) +const byte keys[buttonsCount] = { + KEY_UP_ARROW, + KEY_DOWN_ARROW, + KEY_LEFT_ARROW, + KEY_RIGHT_ARROW +}; + +// Debounce delay +const long debounceDelay = 50; + +bool status[buttonsCount] = {LOW}; +long lastDebounces[buttonsCount] = {0}; + +void setup() { + for (int i = 0; i < buttonsCount; ++i) { + pinMode(pins[i], INPUT); + } + + Keyboard.begin(); +} + +void loop() { + for (int i = 0; i < buttonsCount; ++i) { + const int pinStatus = digitalRead(pins[i]); + if (pinStatus != status[i] && millis() - debounceDelay > lastDebounces[i]) { + status[i] = pinStatus; + if (pinStatus == HIGH) { + Keyboard.press(keys[i]); + } else { + Keyboard.release(keys[i]); + } + lastDebounces[buttonNumber] = millis(); + } + } +} +``` + +You'll maybe need to adjust the debounce delay according to your buttons. Try to +keep it as short as possible. + +## Conclusion + +And _voilà_! We now have a fully functional keyboard, to which it's easy to +add/remove/change buttons. There's still room for improvement: it would be easy +to allow it to send key sequences instead of single key presses, for example. + +You can find the full code on [GitHub](https://github.com/Kernald/gameduino). diff --git a/content/posts/compat-libraries-incompatibilities.md b/content/posts/compat-libraries-incompatibilities.md new file mode 100644 index 0000000..5039a05 --- /dev/null +++ b/content/posts/compat-libraries-incompatibilities.md @@ -0,0 +1,100 @@ ++++ +template = "article.html" +title = "Compat libraries incompatibilities" +date = 2017-05-06T19:52:26+02:00 +description = "Navigating the challenges of Android compat libraries, particularly with vector drawables and their unexpected limitations." + +[taxonomies] +tags = ["android"] ++++ + +Compat libraries are great. They allow us to work with the newest Android APIs, +without thinking (much) about your minimum API level. Instead of thousands +of devices, you can reach billions. With nearly no changes in your code. + +But sometimes, they're not so great… + + + +Support for vector drawables (mainly SVGs files) has been added in API 21. They +come in two kinds: still and animated. They're great for many reasons: they +scale properly, you don't have to keep multiple densities of the same image +anymore, you can reference colors and dimensions from resources, you can do path +morphing… well, almost. + +In order to use vector drawables on pre-21 APIs, Google released a set of two +support libraries, `com.android.support:vector-drawable` and +`com.android.support:animated-vector-drawable`. The first one works starting +with API 7, the latest with API 11. Everything is explained +[here](https://android-developers.googleblog.com/2016/02/android-support-library-232.html). + +Still drawables should work exactly the same regarding the API level you're +running. On the animated version, however, there's a catch: you can't animate +all the properties you can on 21+. From the +[documentation](https://developer.android.com/reference/android/support/graphics/drawable/AnimatedVectorDrawableCompat.html): + +> Note that the animation in AnimatedVectorDrawableCompat has to be valid and +> functional based on the SDK version the app will be running on. Before SDK +> version 21, the animation system didn't support the following features: +> +> * Path Morphing (PathType evaluator). This is used for morphing one path into +> another. +> * Path Interpolation. This is used to define a flexible interpolator +> (represented as a path) instead of the system defined ones like +> LinearInterpolator. +> * Animating 2 values in one ObjectAnimator according to one path's X +> value and Y value. One usage is moving one object in both X and Y +> dimensions along a path. + +Let's say you want an animation using path morphing. Place your vector drawable +in the `drawable-v21` folder and add a fallback with a rotation or whatever in +the `drawable` one, and you're good to go, right? *Right?* + +The same page of the documentation also mentions this: + +> For API 24 and above, this class is delegating to the framework's +> AnimatedVectorDrawable. For older API version, this class uses ObjectAnimator +> and AnimatorSet to animate the properties of a VectorDrawableCompat to create an +> animated drawable. + +And here come troubles. The reasoning behind this is that SDK's implementation +on APIs 21 to 23 [contains some bugs](https://issuetracker.google.com/issues/37116940#comment3). +However, using an `AnimatedVectorDrawableCompat` on an API 21 device comes with +the same limitations as running on an API 19 device: you're using the SDK's +`ObjectAnimator` and `AnimatorSet`, and they do *not* know the support libraries +symbols. More specifically, they don't know how to animate any +`android.support.graphics.drawable.PathParser$PathDataNode` (what they do know +about is the `android.util.PathParser$PathDataNode` class). + +If you're looking for more technical bits, I wrote some more notes on this +[StackOverflow question](http://stackoverflow.com/q/43654496/775894) as I +stumbled upon this issue. + +As a result, on APIs 21 to 23, any path-morphing animation fails silently when +using the support libraries. There's a work-around, though. You can use the +SDK's implementation to load your vector drawable: + +```java +if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) { + // Casting the drawable to an AnimatedVectorDrawable is useless here + // It's just to show that you don't get an AnimatedVectorDrawableCompat + AnimatedVectorDrawable drawable = (AnimatedVectorDrawable) getDrawable(R.drawable.ic_animated_drawable_32dp); + mBinding.imageView.setImageDrawable(drawable); +} else { + mBinding.imageView.setImageResource(R.drawable.ic_animated_drawable_32dp); +} + +// Starting the animation works whatever the implementation we're using now +final Drawable animation = mBinding.imageView.getDrawable(); +if (animation instanceof Animatable) { + ((Animatable) animation).start(); +} +``` + +However, using the SDK's implementation obviously means *not* making use of +the support library's bug-fixes. It will probably work for simple drawables, but +may fail on more complex ones. + +As a final note: if you're using animated vector drawables, remember to test not +only on whatever your minimum SDK is and your latest shiny device, but also on +APIs 21 to 23. You might be surprised. diff --git a/content/posts/compile-ffmpeg-for-android.md b/content/posts/compile-ffmpeg-for-android.md new file mode 100644 index 0000000..41a9461 --- /dev/null +++ b/content/posts/compile-ffmpeg-for-android.md @@ -0,0 +1,310 @@ ++++ +template = "article.html" +title = "Compile FFmpeg for Android" +date = 2014-06-20T10:40:00+02:00 +description = "A comprehensive guide to compiling FFmpeg for Android, including building the necessary toolchain and integrating it into your project." + +[taxonomies] +tags = ["android"] ++++ + +When you have to manipulate audio or video on Android, being used to open-source +software, you have a single name which comes directly to you: FFmpeg. However, +FFmpeg is a C software, meant to be used as an executable, and not officially +supporting Android. + +There are a lot of partial and/or out-of-date how-to out there on how to get +FFmpeg running on Android, like +[halfninja's build](https://github.com/halfninja/android-ffmpeg-x264). However, +I needed to use FFmpeg `concat` demuxer, introduced in FFmpeg 1.1. Most builds +target 0.9. There's +[a ton](http://stackoverflow.com/search?q=ffmpeg+android) +of questions on StackOverflow about getting newer +FFmpeg releases working on Android. So, here's a full explanation to get +[FFmpeg 2.2.3 "Muybridge"](https://www.ffmpeg.org/releases/ffmpeg-2.2.3.tar.bz2) +working on Android. I'll describe the steps for Linux, but everything is pretty +standard shell and should work on any decent OS. + + + +## Prerequisites + +First, let's install everything needed. + +### Android SDK and NDK + +Android SDK is available [here](http://developer.android.com/sdk/index.html) +while the NDK is available +[here](https://developer.android.com/tools/sdk/ndk/index.html). You should also +set two environment variables (`ANDROID_SDK` and `ANDROID_NDK`) to their +respective installation paths. + +On Archlinux, using `android-sdk` and `android-ndk` AUR packages: + +{{ filename(body="Setting environment variables for Android SDK/NDK") }} + +```sh +export ANDROID_NDK=/opt/android-ndk/ +export ANDROID_SDK=/opt/android-sdk/ +``` + +### FFmpeg sources + +Download FFmpeg sources +[here](https://www.ffmpeg.org/releases/ffmpeg-2.2.3.tar.bz2) and extract them in +`$ANDROID_NDK/sources/ffmpeg-2.2.3`. Building third-party libraries in +`$ANDROID_NDK/sources` make them easily available to use in other projects. + +## Building FFmpeg + +### Configuration + +You can tweak the configuration if needed, but here's the one I used: + +{{ filename(body="FFmpeg configuration") }} + +```sh +SYSROOT=$ANDROID_NDK/platforms/android-9/arch-arm/ +# You should adjust this path depending on your platform, e.g. darwin-x86_64 for Mac OS +TOOLCHAIN=$ANDROID_NDK/toolchains/arm-linux-androideabi-4.8/prebuilt/linux-x86_64 +CPU=arm +PREFIX=$(pwd)/android/$CPU + +# Set these if needed +ADDI_CFLAGS="" +ADDI_LDFLAGS="" + +./configure \ + --prefix=$PREFIX \ + --disable-shared \ + --enable-static \ + --disable-doc \ + --disable-ffmpeg \ + --disable-ffplay \ + --disable-ffprobe \ + --disable-ffserver \ + --disable-doc \ + --disable-symver \ + --enable-protocol=concat \ + --enable-protocol=file \ + --enable-muxer=mp4 \ + --enable-demuxer=mpegts \ + --enable-memalign-hack \ + --cross-prefix=$TOOLCHAIN/bin/arm-linux-androideabi- \ + --target-os=linux \ + --arch=arm \ + --enable-cross-compile \ + --sysroot=$SYSROOT \ + --extra-cflags="-Os -fpic -marm $ADDI_CFLAGS" \ + --extra-ldflags="$ADDI_LDFLAGS" +``` + +### Compilation + +The scariest step is in fact the simplest: + +{{ filename(body="FFmpeg compilation") }} + +```sh +make clean +# Adapt the jobs count to your machine +make -j3 +make install +``` + +### Expose FFmpeg to Android NDK + +To be able to use FFmpeg as a usual NDK module, we need an `Android.mk`. It +should be placed in `$ANDROID_NDK/sources/ffmpeg-2.2.3/android/arm`. + +{{ filename(body="Android.mk") }} + +```make +LOCAL_PATH:= $(call my-dir) + +include $(CLEAR_VARS) +LOCAL_MODULE:= libavdevice +LOCAL_SRC_FILES:= lib/libavdevice.a +LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include +include $(PREBUILT_STATIC_LIBRARY) + +include $(CLEAR_VARS) +LOCAL_MODULE:= libavcodec +LOCAL_SRC_FILES:= lib/libavcodec.a +LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include +include $(PREBUILT_STATIC_LIBRARY) + +include $(CLEAR_VARS) +LOCAL_MODULE:= libavformat +LOCAL_SRC_FILES:= lib/libavformat.a +LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include +include $(PREBUILT_STATIC_LIBRARY) + +include $(CLEAR_VARS) +LOCAL_MODULE:= libswscale +LOCAL_SRC_FILES:= lib/libswscale.a +LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include +include $(PREBUILT_STATIC_LIBRARY) + +include $(CLEAR_VARS) +LOCAL_MODULE:= libavutil +LOCAL_SRC_FILES:= lib/libavutil.a +LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include +include $(PREBUILT_STATIC_LIBRARY) + +include $(CLEAR_VARS) +LOCAL_MODULE:= libavfilter +LOCAL_SRC_FILES:= lib/libavfilter.a +LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include +include $(PREBUILT_STATIC_LIBRARY) + +include $(CLEAR_VARS) +LOCAL_MODULE:= libswresample +LOCAL_SRC_FILES:= lib/libswresample.a +LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include +include $(PREBUILT_STATIC_LIBRARY) +``` + +That's it! FFmpeg is ready to use! + +## Using FFmpeg + +To use FFmpeg, I'll stick to [halfninja](https://github.com/halfninja)'s +idea: adapt FFmpeg's `main()` to a simple function, and write a JNI +interface around it. A sample project is available on +[GitHub](https://github.com/Kernald/ffmpeg-android). + +### Adapting FFmpeg's `main()` + +I used some FFmpeg's executable source files (`ffmpeg.c`, containing `main()`, +and directly related ones), and tweaked them: removed every `exit()` call and +replaced `av_log()` calls to use Android's LogCat. As FFmpeg's executable is +meant to be run once, then exited, I also needed to reinitialize some static +variables between every `main()` calls. + + + +_Update from March 27th 2016_: for an up-to-date sample, see +[this GitHub repository](https://github.com/HikoQiu/JNI_INVOKE_FFMPEG/blob/master/jni/ffmpeg.c#L4122). +Thanks Hiko! + + +### JNI interface + +The JNI interface is really simple: a simple C wrapper calling FFmpeg's +`main()`, and a Java wrapper around it. + +Here's the C function, excluding usual JNI boilerplate (complete file is +available on GitHub): + +{{ filename(body="JNI C wrapper") }} + +```c +JNIEXPORT jboolean JNICALL Java_fr_enoent_videokit_Videokit_run(JNIEnv *env, jobject obj, jobjectArray args) { + int i = 0; + int argc = 0; + char **argv = NULL; + jstring *strr = NULL; + + if (args != NULL) { + argc = (*env)->GetArrayLength(env, args); + argv = (char **) malloc(sizeof(char *) * argc); + strr = (jstring *) malloc(sizeof(jstring) * argc); + + for (i = 0; i < argc; ++i) { + strr[i] = (jstring)(*env)->GetObjectArrayElement(env, args, i); + argv[i] = (char *)(*env)->GetStringUTFChars(env, strr[i], 0); + LOGI("Option: %s", argv[i]); + } + } + + LOGI("Running main"); + int result = main(argc, argv); + LOGI("Main ended with status %d", result); + + for (i = 0; i < argc; ++i) { + (*env)->ReleaseStringUTFChars(env, strr[i], argv[i]); + } + free(argv); + free(strr); + + return result == 0; +} +``` + +The function simply takes JNI arguments (`jobject obj` and `jobjectArray args`) +and creates matching `char*` parameters. These parameters are then passed to +FFmpeg's `main()`. It then returns `true` if everything was fine (FFmpeg +returned `0`), `false` otherwise. + +The Java part is even simpler. Once again, only the interesting part: + +{{ filename(body="JNI Java wrapper") }} + +```java +package fr.enoent.videokit; + +public final class Videokit { + + // Truncated library loading, see complete file on GitHub + + /** + * Call FFmpeg with specified arguments + * @param args FFmpeg arguments + * @return true if success, false otherwise + */ + public boolean process(String[] args) { + String[] params = new String[args.length + 1]; + params[0] = "ffmpeg"; + System.arraycopy(args, 0, params, 1, args.length); + + return run(params); + } + + private native boolean run(String[] args); +} +``` + +The native `run()` method is pretty obvious: it simply calls the previous C +function. However, FFmpeg's `main()` expects to see the executable name as its +first parameter. Even if we don't compile it as an executable file, I found it +simpler to add this parameter than modifying FFmpeg code to not use it. Hence, +the `process()` method, which is the only public interface to call FFmpeg. It +simply adds `ffmpeg` as first parameter, then calls `run()`. + +### Call FFmpeg from Java + +Once we have the JNI wrapper in place, calling FFmpeg from Java code is really +straightforward. Here's a sample call which trims the video available on +`/sdcard/input.mp4` to keep only 15 seconds of it, and write the result to +`/sdcard/output.mp4`: + +{{ filename(body="Using FFmpeg") }} + +```java +if (Videokit.getInstance().process(new String[] { + "-y", // Overwrite output files + "-i", // Input file + "/sdcard/input.mp4", + "-ss", // Start position + "0", + "-t", // Duration + "15", + "-vcodec", // Video codec + "copy", + "-acodec", // Audio codec + "copy", + "/sdcard/output.mp4" // Output file +)) { + Log.d(TAG, "Trimming: success"); +} else { + Log.d(TAG, "Trimming: failure"); +} +``` + +## Conclusion + +While using FFmpeg on Android is really useful when dealing with audio and video +files, it wasn't as easy as one could think to get it working the first time, +with an up-to-date FFmpeg version. However, once set up, it works great, with +decent performances even on mid-end hardware. diff --git a/content/posts/creating-a-blog-with-bazel/01-a-new-beginning/index.md b/content/posts/creating-a-blog-with-bazel/01-a-new-beginning/index.md new file mode 100644 index 0000000..0a69e00 --- /dev/null +++ b/content/posts/creating-a-blog-with-bazel/01-a-new-beginning/index.md @@ -0,0 +1,36 @@ ++++ +template = "article.html" +title = "A new beginning" +date = 2019-10-31T21:05:00+11:00 +description = "Resurrecting an inactive blog by migrating from Octopress to Hugo, and embarking on a journey to build everything with Bazel." + +[taxonomies] +tags = ["bazel"] ++++ + +This blog has been inactive for a long time. I tried to at least post an article +yearly, and next thing you know, two years and a half fly by... Halloween seemed +like a good time to resurrect it. + +I wanted to start writing again recently, and faced an issue: this blog was +using Octopress 2. Well, Octopress has [apparently been dead for even longer +than this blog](http://octopress.org/). So I wanted to switch to another static +generator. I found [Hugo](https://gohugo.io/), which is actively maintained and +ticked all the boxes I had, so that's what I settled for (sorry for the probable +RSS feed mess - while I set up 301 redirects for the old articles, I guess this +won't play nicely with any RSS reader. This is actually what prompted this +article...) + +This could have been an hour worth of work - migrating the content (both Hugo +and Octopress are using Markdown, so that part was really simple), finding or +putting together a nice template, and call it a day. But how fun is that? +Instead, I chose to go with the most complex (hence fun, right?) approach +possible. And that was by using [Bazel](https://bazel.build/) to do +_everything_. Sass linting and pre-processing, HTML generation, generating a +Docker image, deploying it... and with tests for a lot of things along the way. +Today, the deployment part is still missing (I'm working on it), but everything +else is pretty much ready. + +I plan to describe this whole journey soon, although I don't know exactly which +form it will take yet - probably a series of small articles covering a specific +aspect. In the meantime, welcome back on a brand-new blog! diff --git a/content/posts/creating-a-blog-with-bazel/02-compiling-a-kotlin-application-with-bazel/index.md b/content/posts/creating-a-blog-with-bazel/02-compiling-a-kotlin-application-with-bazel/index.md new file mode 100644 index 0000000..639bfd4 --- /dev/null +++ b/content/posts/creating-a-blog-with-bazel/02-compiling-a-kotlin-application-with-bazel/index.md @@ -0,0 +1,639 @@ ++++ +template = "article.html" +title = "Compiling a Kotlin application with Bazel" +date = 2019-12-08T11:30:00+11:00 +description = "A comprehensive guide to building Kotlin applications with Bazel, including dependency management, testing, and static analysis with Detekt and Ktlint." + +[taxonomies] +tags = ["bazel", "kotlin"] ++++ + +This post will describe how to compile a small application written in Kotlin +using [Bazel](https://bazel.build), tests, as well as how to use static +analyzers. + +## Phosphorus + +Phosphorus is the application that this post will cover. It's a small utility +that I wrote to check if an image matches a reference. If it doesn't, Phosphorus +generates an image highlighting the differences. The goal is to be able to check +that something generates an image in a given way, and doesn't change - at least +if it's not expected. The actual usage will be covered later in this series. +While it's not open-source yet, it's something I intend to do at some point. + +It's written in Kotlin, as a couple external dependencies ( +[Clikt](https://ajalt.github.io/clikt/) and [Dagger](https://dagger.dev/)), as +well as a few tests. This is the structure: + +{% mermaid(caption="Phosphorus's class diagram") %} +classDiagram + namespace loader { + class ImageLoader { + <> + } + class ImageIoLoader { + } + } + + namespace differ { + class ImageDiffer { + <> + } + class ImageDifferImpl { + } + } + + namespace data { + class Image + class DiffResult + } + + class Phosphorus + + ImageIoLoader ..|> ImageLoader + ImageDifferImpl ..|> ImageDiffer + Phosphorus --> ImageLoader + Phosphorus --> ImageDiffer +{% end %} + +The `differ` module contains the core logic - comparing two images, and +generating a `DiffResult`. This `DiffResult` contains both the straightforward +result of the comparison (are the two images identical?) and an image +highlighting the differences, if any. The `loader` package is responsible for +loading and writing images. Finally, the `Phosphorus` class orchestrates all +that, in addition to processing command line arguments with Clikt. + +## Dependencies + +Phosphorus has two dependencies: Clikt, and Dagger. Both of them are available +as Maven artifacts. In order to pull Maven artifacts, the Bazel team provides a +set of rules called +[rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external/). The +idea is the following: you list a bunch of Maven coordinates and repositories, +the rule will fetch all of them (and their transitive dependencies) during the +loading phase, and generate Bazel targets corresponding to those Maven +artifacts, on which you can depend. Let's see how we can use them. The first +step is to load the rules, in the `WORKSPACE`: + +```python +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +http_archive( + name = "rules_jvm_external", + sha256 = "62133c125bf4109dfd9d2af64830208356ce4ef8b165a6ef15bbff7460b35c3a", + strip_prefix = "rules_jvm_external-3.0", + url = "https://github.com/bazelbuild/rules_jvm_external/archive/3.0.zip", +) +``` + +Then, we can load and call `maven_install` with the list of Maven coordinates we +want, in the `WORKSPACE` too: + +```python +load("@rules_jvm_external//:defs.bzl", "maven_install") + +maven_install( + artifacts = [ + "com.github.ajalt:clikt:2.2.0", + "com.google.dagger:dagger:2.25.2", + "com.google.dagger:dagger-compiler:2.25.2", + "com.google.truth:truth:1.0", + "javax.inject:javax.inject:1", + "junit:junit:4.12", + ], + fetch_sources = True, + repositories = [ + "https://maven.google.com", + "https://repo1.maven.org/maven2", + "https://jcenter.bintray.com/", + ], + strict_visibility = True, +) +``` + +A couple of things to note: + +- We're also downloading [JUnit](https://junit.org/junit4/) and +[Truth](https://truth.dev/), that we're going to use in tests +- `maven_install` can try to download the sources, if they're available on +Maven, to be able to see them directly from the IDE + +At this point, Clikt, JUnit and Truth are ready to be used. They are exposed +respectively as `@maven//:com_github_ajalt_clikt`, `@maven//:junit_junit` and +`@maven//:com_google_truth_truth`. + +Dagger, on the other hand, comes with an annotation processor and, as such, +needs some more work: it needs to be exposed as a Java Plugin. Because it's a +third party dependency, this will be defined in `//third_party/dagger/BUILD`: + +```python +java_plugin( + name = "dagger_plugin", + processor_class = "dagger.internal.codegen.ComponentProcessor", + deps = [ + "@maven//:com_google_dagger_dagger_compiler", + ], +) + +java_library( + name = "dagger", + exported_plugins = [":dagger_plugin"], + visibility = ["//visibility:public"], + exports = [ + "@maven//:com_google_dagger_dagger", + "@maven//:com_google_dagger_dagger_compiler", + "@maven//:javax_inject_javax_inject", + ], +) +``` + +It can now be used as `//third_party/dagger`. + +## Compilation + +Bazel doesn't support Kotlin out of the box (the few languages natively +supported, Java and C++, are currently getting extracted from Bazel's core, so +all languages will soon share a similar integration). In order to compile some +Kotlin code, we'll have to use some Starlark rules describing how to use +`kotlinc`. A set of rules is available +[here](https://github.com/bazelbuild/rules_kotlin/). While they don't support +Kotlin/Native, they do support targeting both the JVM (including Android) and +JavaScript. + +In order to use those rules, we need to declare them in the `WORKSPACE`: + +```python +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +http_archive( + name = "io_bazel_rules_kotlin", + sha256 = "54678552125753d9fc0a37736d140f1d2e69778d3e52cf454df41a913b964ede", + strip_prefix = "rules_kotlin-legacy-1.3.0-rc3", + url = "https://github.com/bazelbuild/rules_kotlin/archive/legacy-1.3.0-rc3.zip", +) + +load("@io_bazel_rules_kotlin//kotlin:kotlin.bzl", "kotlin_repositories", "kt_register_toolchains") + +kotlin_repositories() + +kt_register_toolchains() +``` + +Once that's done, we have access to a few rules: + +- `kt_js_library` +- `kt_js_import` +- `kt_jvm_binary` +- `kt_jvm_import` +- `kt_jvm_library` +- `kt_jvm_test` +- `kt_android_library` + +We're going to use `kt_jvm_binary`, `kt_jvm_library` as well as `kt_jvm_test`. + +As JVM-based languages have a strong correlation between packages and folder +structure, we need to be careful about where we store our source code. Bazel +handles a few names as potential Java "roots": `java`, `javatests` and `src`. +Anything inside a directory named like this needs to follow the package/folder +correlation. For example, a class +`fr.enoent.phosphorus.client.matcher.Phosphorus` can be stored at those +locations: + +- `//java/fr/enoent/phosphorus/Phosphorus.kt` +- `//tools/images/java/fr/enoent/phosphorus/Phosphorus.kt` +- `//java/tools/images/src/fr/enoent/phosphorus/Phosphorus.kt` + +In my repo, everything Java-related is stored under `//java`, and the +corresponding tests are in `//javatests` (following the same structure). +Phosphorus will hence be in `//java/fr/enoent/phosphorus`. + +Let's see how we can define a simple Kotlin library, with the `data` module. In +`//java/fr/enoent/phosphorus/data/BUILD`: + +```python +load("@io_bazel_rules_kotlin//kotlin:kotlin.bzl", "kt_jvm_library") + +kt_jvm_library( + name = "data", + srcs = [ + "DiffResult.kt", + "Image.kt", + ], + visibility = [ + "//java/fr/enoent/phosphorus:__subpackages__", + "//javatests/fr/enoent/phosphorus:__subpackages__", + ], +) +``` + +And that's it, we have our first library ready to be compiled! I won't describe +all the modules as it's pretty repetitive and there's not a lot of value into +doing that, but let's see what the main binary looks like. Defined in +`//java/fr/enoent/phosphorus/BUILD`, we have: + +```python +load("@io_bazel_rules_kotlin//kotlin:kotlin.bzl", "kt_jvm_binary") + +kt_jvm_binary( + name = "phosphorus", + srcs = [ + "Phosphorus.kt", + ], + main_class = "fr.enoent.phosphorus.PhosphorusKt", + visibility = ["//visibility:public"], + deps = [ + "//java/fr/enoent/phosphorus/differ", + "//java/fr/enoent/phosphorus/differ/impl:module", + "//java/fr/enoent/phosphorus/loader", + "//java/fr/enoent/phosphorus/loader/io_impl:module", + "//third_party/dagger", + "@maven//:com_github_ajalt_clikt", + ], +) +``` + +Note the name of the `main_class`: because it's a Kotlin class, the compiler +will append `Kt` at the end of its name. Once this is defined, we can run +Phosphorus with this command: + +``` +bazel run //java/fr/enoent/phosphorus -- arguments passed to Phosphorus directly +``` + +## Tests + +As mentioned previously, the test root will be `//javatests`. Because we need to +follow the packages structure, the tests themselves will be under +`//javatests/fr/enoent/phosphorus`. They are regular JUnit 4 tests, using Truth +for the assertions. + +Defining unit tests is really straightforward, and follows really closely the +pattern we saw with libraries and binaries. For example, the `ImageTest` test is +defined like this, in `//javatests/fr/enoent/phosphorus/data/BUILD`: + +```python +load("@io_bazel_rules_kotlin//kotlin:kotlin.bzl", "kt_jvm_test") + +kt_jvm_test( + name = "ImageTest", + srcs = ["ImageTest.kt"], + deps = [ + "//java/fr/enoent/phosphorus/data", + "@maven//:com_google_truth_truth", + "@maven//:junit_junit", + ], +) + +``` + +This will define a Bazel target that we can invoke like this: + +``` +bazel test //javatests/fr/enoent/phosphorus/data:ImageTest +``` + +Hopefully, the output should look like this: + +``` +//javatests/fr/enoent/phosphorus/data:ImageTest PASSED in 0.3s +``` + +Once this is done, it's possible to run +`ibazel test //javatests/fr/enoent/phosphorus/...` - Bazel will then monitor all +the test targets defined under that path, as well as their dependencies, and +re-run all the affected tests as soon as something is edited. Because Bazel +encourages small build targets, has some great caching, and the Kotlin compiler +uses a persistent worker, the feedback loop is really quick. + +## Static analysis + +For Kotlin, two tools are quite useful: +[Detekt](https://arturbosch.github.io/detekt/), and +[Ktlint](https://ktlint.github.io/). The idea to run them will be really +similar: having two supporting test targets for each actual Kotlin target, +running Detekt and Ktlint on its sources. In order to do that easily, we'll +define some wrappers around the `kt_jvm_*` set of rules. Those wrappers will be +responsible for generating the two supporting test targets, as well as calling +the original `kt_jvm_*` rule. The resulting macro will be entirely transparent +to use, the only difference being the `load` call. + +Let's see what those macros could look like. In `//java/rules/defs.bzl`: + +```python +load( + "@io_bazel_rules_kotlin//kotlin:kotlin.bzl", + upstream_kt_jvm_binary = "kt_jvm_binary", + upstream_kt_jvm_library = "kt_jvm_library", + upstream_kt_jvm_test = "kt_jvm_test", +) +def kt_jvm_binary(name, srcs, **kwargs): + upstream_kt_jvm_binary( + name = name, + srcs = srcs, + **kwargs + ) + + _common_tests(name = name, srcs = srcs) + +def kt_jvm_library(name, srcs, **kwargs): + upstream_kt_jvm_library( + name = name, + srcs = srcs, + **kwargs + ) + + _common_tests(name = name, srcs = srcs) + +def kt_jvm_test(name, srcs, size = "small", **kwargs): + upstream_kt_jvm_test( + name = name, + srcs = srcs, + size = size, + **kwargs + ) + + _common_tests(name = name, srcs = srcs) + +def _common_tests(name, srcs): + # This will come soon, no-op for now +``` + +With those wrappers defined, we need to actually call them. Because we're +following the same signature and name as the upstream rules, we just need to +update our `load` calls in the different `BUILD` files. +`load("@io_bazel_rules_kotlin//kotlin:kotlin.bzl", "kt_jvm_test")` will become +`load("//java/rules:defs.bzl", "kt_jvm_test")`, and so on. `_common_tests` will +be responsible for calling Detekt and Ktlint, let's see how. + +### Detekt + +[Artem Zinnatullin](https://twitter.com/artem_zin) published a +[set of rules](https://github.com/buildfoundation/bazel_rules_detekt/) to run +Detekt a week before I started writing this, making things way easier. As usual, +let's start by loading this in the `WORKSPACE`: + +```python +http_file( + name = "detekt_cli_jar", + sha256 = "e9710fb9260c0824b3a9ae7d8326294ab7a01af68cfa510cab66de964da80862", + urls = ["https://jcenter.bintray.com/io/gitlab/arturbosch/detekt/detekt-cli/1.2.0/detekt-cli-1.2.0-all.jar"], +) + +http_archive( + name = "rules_detekt", + sha256 = "f1632c2492291f5144a5e0f5e360a094005e20987518d228709516cc935ad1a1", + strip_prefix = "bazel_rules_detekt-0.2.0", + url = "https://github.com/buildfoundation/bazel_rules_detekt/archive/v0.2.0.zip", +) +``` + +This exposes a rule named `detekt`, which defines a build target, generating the +Detekt report. While there are a few options, we'll keep things simple. This is +what a basic invocation looks like, in any `BUILD` file: + +```python +detekt( + name = "detekt_report", + srcs = glob(["**/*.kt"]), +) +``` + +We can integrate that in our `_common_tests` macro, to generate a Detekt target +automatically for every Kotlin target: + +```python +def _common_tests(name, srcs): + detekt( + name = "%s_detekt_report" % name, + srcs = srcs, + config = "//java/rules/internal:detekt-config.yml", + ) +``` + +All our Kotlin targets now have a `$name_detekt_report` target generated +automatically, using a common Detekt configuration. + +The way this `detekt` rule work is by creating a build target, that generates +the report. Which means that it's not actually a test - which is what we were +trying to achieve. In order to do this, we can use +[Bazel Skylib](https://github.com/bazelbuild/bazel-skylib)'s `build_test`. This +test rule generates a test target that just has a dependency on other targets - +if any of those dependencies fails to build, then the test fails. Otherwise, it +passes. Our macro becomes: + +```python +def _common_tests(name, srcs): + detekt( + name = "%s_detekt_report" % name, + srcs = srcs, + config = "//java/rules/internal:detekt-config.yml", + ) + + build_test( + name = "%s_detekt_test" % name, + targets = [":%s_detekt_report" % name], + ) +``` + +And there we have it - a `$name_detekt_test` that is actually a test, and will +fail if Detekt raises errors. + +### Ktlint + +Ktlint doesn't have any existing open-source rules. Let's see how we can write +our own minimal one. It will take as inputs the list of files to check, as well +as an optional [editorconfig](https://editorconfig.org/) configuration, that +Ktlint supports natively. + +The definition of the rules will be split in three files: two internal files +defining respectively the _action_ (how to invoke Ktlint) and the _rule +interface_ (what's its name, its arguments...), as well as a third, public file, +meant to be consumed by users. + +Let's start by downloading Ktlint itself. In the `WORKSPACE`, as usual: + +```python +http_file( + name = "com_github_pinterest_ktlint", + executable = True, + sha256 = "a656342cfce5c1fa14f13353b84b1505581af246638eb970c919fb053e695d5e", + urls = ["https://github.com/pinterest/ktlint/releases/download/0.36.0/ktlint"], +) +``` + +Let's move onto the action definition. It's a simple macro returning a string, +which defines how to invoke Ktlint, given some arguments. In +`//tools/ktlint/internal/actions.bzl`: + +```python +def ktlint(ctx, srcs, editorconfig): + """Generates a test action linting the input files. + + Args: + ctx: analysis context. + srcs: list of source files to be checked. + editorconfig: editorconfig file to use (optional) + + Returns: + A script running ktlint on the input files. + """ + + args = [] + + if editorconfig: + args.append("--editorconfig={file}".format(file = editorconfig.short_path)) + + + for f in srcs: + args.append(f.path) + + return "{linter} {args}".format( + linter = ctx.executable._ktlint_tool.short_path, + args = " ".join(args), + ) +``` + +Pretty straightforward - we combine both Ktlint's executable path, the +editorconfig file if it's provided, and the list of source files. + +Now for the rule interface, we will define a rule named `ktlint_test`. Building +a `ktlint_test` target will mean generating a shell script to invoke Ktlint with +the given set of argument, and running it will invoke that script - hence +running Ktlint as well. In `//tools/ktlint/internal/rules.bzl`: + +```python +load(":actions.bzl", "ktlint") + +def _ktlint_test_impl(ctx): + script = ktlint( + ctx, + srcs = ctx.files.srcs, + editorconfig = ctx.file.editorconfig, + ) + + ctx.actions.write( + output = ctx.outputs.executable, + content = script, + ) + + files = [ctx.executable._ktlint_tool] + ctx.files.srcs + + if ctx.file.editorconfig: + files.append(ctx.file.editorconfig) + + return [ + DefaultInfo( + runfiles = ctx.runfiles( + files = files, + ).merge(ctx.attr._ktlint_tool[DefaultInfo].default_runfiles), + executable = ctx.outputs.executable, + ), + ] + +ktlint_test = rule( + _ktlint_test_impl, + attrs = { + "srcs": attr.label_list( + allow_files = [".kt", ".kts"], + doc = "Source files to lint", + mandatory = True, + allow_empty = False, + ), + "editorconfig": attr.label( + doc = "Editor config file to use", + mandatory = False, + allow_single_file = True, + ), + "_ktlint_tool": attr.label( + default = "@com_github_pinterest_ktlint//file", + executable = True, + cfg = "target", + ), + }, + doc = "Lint Kotlin files, and fail if the linter raises errors.", + test = True, +) + +``` + +We have two different parts here - the definition of the interface, with the +call to `rule`, and the implementation of that rule, defined as +`_ktlint_test_impl`. + +The call to `rule` define how this rule can be invoked. We define that it +requires a list of `.kt` and/or `.kts` files named `srcs`, an optional file +named `editorconfig`, as well as a hidden argument named `_ktlint_tool`, which +is just a helper for us to reference the Ktlint binary - to which we pass the +file we defined in the `WORKSPACE` earlier. + +The actual implementation is working in multiple steps: + +1. It invokes the `ktlint` action we defined earlier, to generate the script +that will be invoked. +2. It generates an action to write that script, in a file referred as +`ctx.outputs.executable` (which Bazel knows how to handle and what to do with +it, we don't need to worry about where it is or anything, it won't be in the +source tree anyway). +3. It computes a list of files that are needed to run this target. This is what +allows Bazel to ensure hermeticity - it will know that this rule needs to be +re-run if any of those files are changed. If the target runs in a sandboxed +environment (which is the default on most platforms, as far as I'm aware), only +those files will be available. +4. It returns a `Provider`, responsible for holding a description of what this +target needs. + +Finally, we write a file that only exposes the bits users should care about. +It's not mandatory, but makes a clear delimitation between what is an +implementation detail and what users can actually rely on. In +`//tools/ktlint/defs.bzl`: + +```python +load( + "//tools/ktlint/internal:rules.bzl", + _ktlint_test = "ktlint_test", +) + +ktlint_test = _ktlint_test +``` + +We just expose the rule we wrote in `rules.bzl` as `ktlint_test`. + +Once this is done, we can use this `ktlint_test` rule where we needed it, in our +`_common_tests` macro for Kotlin targets: + +```python +def _common_tests(name, srcs): + ktlint_test( + name = "%s_ktlint_test" % name, + srcs = srcs, + editorconfig = "//:.editorconfig", + ) + + detekt( + name = "%s_detekt_report" % name, + srcs = srcs, + config = "//java/rules/internal:detekt-config.yml", + ) + + build_test( + name = "%s_detekt_test" % name, + targets = [":%s_detekt_report" % name], + ) +``` + +And there we have it - all our Kotlin targets have both Detekt and Ktlint test +targets. Because we're exposing those as Bazel targets, we automatically benefit +from its caching and remote execution capabilities - those linters won't re-run +if the inputs didn't change, and can run remotely, with Bazel being aware of +which files are needed on the remote machine. + +## Closing thoughts + +But what's the link between generating a blog with Bazel and compiling a Kotlin +application? Well, almost none, but there is one. The class diagram included +earlier in this article is generated with a tool called +[PlantUML](http://plantuml.com/), which generates images from a text +representation of a graph. The next article in this series will talk about +integrating this tool into Bazel (in a similar way as we did with Ktlint), but +also how to test the Bazel rule. And to have some integration tests, Phosphorus +will come in handy! diff --git a/content/posts/creating-a-blog-with-bazel/03-why-bazel/index.md b/content/posts/creating-a-blog-with-bazel/03-why-bazel/index.md new file mode 100644 index 0000000..a9c21c2 --- /dev/null +++ b/content/posts/creating-a-blog-with-bazel/03-why-bazel/index.md @@ -0,0 +1,234 @@ ++++ +template = "article.html" +title = "Why Bazel?" +date = 2019-11-02T18:00:00+11:00 +description = "An overview of Bazel's core concepts, from hermetic builds and reproducibility to extensibility and its three-phase build system." + +[taxonomies] +tags = ["bazel"] ++++ + +In this post, we'll cover what [Bazel](https://bazel.build) is, how to use it, +and why I chose to use it. + +## What is Bazel? + +Bazel is a build-system released by Google in 2015. It actually is derived from +the internal build-system Google uses internally for most of its own code-base, +called +[Blaze](https://mike-bland.com/2012/10/01/tools.html#blaze-forge-srcfs-objfs). + +### Building at scale + +Bazel has a huge focus on hermetic builds, and reproducibility. Every build step +is, from a really broad perspective, defined as a list of inputs, tools, and +outputs. This allows for efficient and robust caching (if no inputs nor tools +changed, then this target doesn't need to be rebuilt, and this cascades through +the whole build graph). Let's see a sample definition of a C++ library, as well +as a C++ binary depending on it: + +*BUILD* + +```python +cc_library( + name = "my_feature" + srcs = [ + "feature_impl.cpp", + "utils.cpp", + ], + hdrs = [ + "feature.hpp", + "utils.hpp", + ], +) + +cc_binary( + name = "my_app", + srcs = ["main.cpp"], + deps = [ + ":my_feature", + ], +) +``` + +`cc_library` and `cc_binary` are both depending an implicit dependency on a C++ +toolchain (I won't enter into any language-specific features in this post, but +if you don't tell Bazel to use a specific C++ toolchain, it will try to use your +system compiler - which is convenient, but loses a bit of hermeticity and +reproducibility). Everything else is pretty obvious here: we defined two +different build targets, one of them being a library called `my_feature`, and +the other one a binary called `my_app`, depending on `my_feature`. If we build +`my_app`, Bazel will automatically build `my_feature` first as you would expect, +and then proceed to build `my_app`. If you change the `main.cpp` and re-build +`my_app`, it will skip the compilation of `my_feature` entirely, as nothing +changed. + +Bazel's cache handling is really reliable. During the past few months, I've done +a lot of diverse things (writing my own rules, compiling a bunch of different +languages, depending on third-party libraries and rules...), and never had a +single time to run `bazel clean`. Now I didn't use a lot of other build systems +in the recent past, but from someone who has been using +[Gradle](https://gradle.org/) for Android previously, this feels really weird. + +### Integrating tools and other languages + +Another great aspect of Bazel is its extensibility. It works with rules defined +in a language called [Starlark](https://github.com/bazelbuild/starlark), which +syntax is a subset of Python's. It comes without a lot of standard Python +features, as I/O, mutable collections, or anything that could affect build +hermeticity. While this isn't the focus of this article (I will cover the +writing of a rule to run a simple tool in a later article), here is what an +example rule can look like (from +[Bazel's samples](https://github.com/bazelbuild/examples/blob/master/rules/shell_command/rules.bzl)): + +*rules.bzl* + +```python +def _convert_to_uppercase_impl(ctx): + # Both the input and output files are specified by the BUILD file. + in_file = ctx.file.input + out_file = ctx.outputs.output + ctx.actions.run_shell( + outputs = [out_file], + inputs = [in_file], + arguments = [in_file.path, out_file.path], + command = "tr '[:lower:]' '[:upper:]' < \"$1\" > \"$2\"", + ) + # No need to return anything telling Bazel to build `out_file` when + # building this target -- It's implied because the output is declared + # as an attribute rather than with `declare_file()`. + +convert_to_uppercase = rule( + implementation = _convert_to_uppercase_impl, + attrs = { + "input": attr.label( + allow_single_file = True, + mandatory = True, + doc = "The file to transform", + ), + "output": attr.output(doc = "The generated file"), + }, + doc = "Transforms a text file by changing its characters to uppercase.", +) +``` + +Once it's defined, it's re-usable to define actual build targets in a simple way: + +*BUILD* + +```python +load(":rules.bzl", "convert_to_uppercase") + +convert_to_uppercase( + name = "foo_but_uppercase", + input = "foo.txt", + output = "upper_foo.txt", +) +``` + +As a result of this simple extensibility, while Bazel ships only with C++ and +Java support (which are actually getting removed and rewritten in Starlark, to +decouple them from Bazel itself), a lot of rules have been written either by the +Bazel team or by the community, to integrate languages and tools. You can find +rules for [NodeJS](https://github.com/bazelbuild/rules_nodejs), +[Go](https://github.com/bazelbuild/rules_go), +[Rust](https://github.com/bazelbuild/rules_rust), +[packaging](https://github.com/bazelbuild/rules_pkg) (generating debs, zips...), +[generating Docker images](https://github.com/bazelbuild/rules_docker), +[deploying stuff on Kubernetes](https://github.com/bazelbuild/rules_k8s), and a +bunch of other things. And if there are no rules to run/build what you want, you +can write your own! + +### A three-steps build + +Bazel runs in +[three distinct phases](https://docs.bazel.build/versions/master/guide.html#phases). +Each of them has a specific role, and specific capabilities. + +#### Loading + +The loading phase is parsing and evaluating all the `BUILD` files required to +build the requested target(s). This is typically the step during witch any +third-party dependency would be fetched (just downloaded and/or extracted, +nothing more yet). + +#### Analysis + +The second phase is validating any involved build rule, to generate the actual +build graph. Note that both of those two first phases are entirely cached, and +if the build graph doesn't change from one build to another (e.g. you just +changed some source files), they will be skipped entirely. + +#### Execution + +This is the phase that checks for any out-of-date output (either non-existent, +or its inputs changed), and runs the matching actions. + +### Great tooling + +Bazel comes with some really cool tools. Without spending too much time on that, +here's a list of useful things: + +- [ibazel](https://github.com/bazelbuild/bazel-watcher) is a filesystem-watcher + that will rebuild a target as soon as its inputs files or dependencies + changed. +- [query](https://docs.bazel.build/versions/master/query-how-to.html) is a + built-in sub-command that helps to analyse the build graph. It's incredibly + feature-packed. +- [buildozer](https://github.com/bazelbuild/buildtools/tree/master/buildozer) is + a tool to edit `BUILD` files at across a whole repository. It can be used to + add dependencies to specific targets, changing target visibilities, adding + comments... +- [unused_deps](https://github.com/bazelbuild/buildtools/blob/master/unused_deps/README.md) + is detecting unused dependencies for Java targets, and displays `buildozer` + commands to remove them. +- Integration [with](https://github.com/bazelbuild/intellij) + [different](https://github.com/bazelbuild/vscode-bazel) + [IDEs](https://github.com/bazelbuild/vim-bazel). +- A set of APIs for remote caching and execution, with + [a](https://gitlab.com/BuildGrid/buildgrid) + [few](https://github.com/bazelbuild/bazel-buildfarm) + [implementations](https://github.com/buildbarn), as well as an upcoming + service on Google Cloud called Remote Build Execution, leveraging GCP to build + remotely. The loading and analysis phases are still running locally, while the + execution phase is running remotely. + +## Choosing a build system + +At the time I started thinking about working on this blog again, I had a small +private repository with a bunch of stuff, all compiled with Bazel. I also +noticed a [set of Starlark rules](https://github.com/stackb/rules_hugo) +integrating [Hugo](https://gohugo.io/). While I didn't need a build system, +Bazel seemed to be interesting for multiple aspects: + +- I could leverage my existing CI system +- While Hugo comes with a bunch of features to e.g. pre-process Sass files, it + has some kind of lock-in effect. What if I eventually realise that Hugo + doesn't fill my need? What's the cost of migrating to a new static site + generator? The less I rely on Hugo-specific features, the easier this would be +- I could integrate some custom asset pipelines. For example, I could have a + diagram written with [PlantUML](http://plantuml.com/) or + [Mermaid](https://mermaidjs.github.io/) and have it part of the Bazel graph, + as a dependency of this blog +- Bazel would be able to handle packaging and deployment +- It sounded stupid enough to be a fun experiment? (Let's be honest, that's the + only real reason here.) + +## Closing thoughts + +Bazel is quite complex, and this article only scratches the surface. The goal +was not to teach you how to use Bazel (there are a lot of existing resources for +that already), but to give a quick overview of the core ideas behind it. + +If you found it interesting, here are some useful links: + +- Bazel's + [getting started](https://docs.bazel.build/versions/master/getting-started.html) +- A [list of samples](https://github.com/bazelbuild/examples) using different + languages as well as defining some rules +- A (non-exhaustive) + [list of rules](https://docs.bazel.build/versions/master/rules.html), as well + as the documentation of all the built-in rules + +In the next article, we'll see how to build a simple Kotlin app with Bazel, from +scratch all the way to running it. diff --git a/content/posts/creating-a-blog-with-bazel/04-writing-bazel-rule-set/index.md b/content/posts/creating-a-blog-with-bazel/04-writing-bazel-rule-set/index.md new file mode 100644 index 0000000..340ca9e --- /dev/null +++ b/content/posts/creating-a-blog-with-bazel/04-writing-bazel-rule-set/index.md @@ -0,0 +1,501 @@ ++++ +template = "article.html" +title = "Writing a Bazel rule set" +date = 2020-05-16T15:55:00+11:00 +description = "Learn how to write custom Bazel rules by integrating PlantUML, including rule implementation and testing strategies." + +[taxonomies] +tags = ["bazel", "plantuml"] ++++ + +This post will cover two things: + +- How to run an arbitrary tool with Bazel (in this case, +[PlantUML](https://plantuml.com/), a tool to generate diagrams), by writing a +rule set +- How to test this rule set. + +It should be mentioned that while I was working on this rule set, it became more +and more apparent PlantUML is not a great candidate for this kind of +integration, as its output is platform-dependent (the font rendering). Despite +that, it's still a simple tool and as such its integration is simple, albeit not +perfect (the rendering tests I wrote need to run on the same platform every +time). + +## PlantUML usage + +PlantUML is a tool that takes a text input looking like this: + +``` +@startuml +Alice -> Bob: SYN +@enduml +``` + +And outputs an image looking like this: + +{% mermaid(caption="PlantUML sample output") %} +sequenceDiagram + Alice->>Bob: SYN +{% end %} + +PlantUML has multiple way of being invoked (CLI, GUI, as well as a _lot_ of +integrations with different tools), but we'll go with the easiest: a one-shot +CLI invocation. It takes as inputs: + +- A text file, representing a diagram +- An optional configuration file, giving control over the output + +It then outputs a single image file, which can be of different formats (we'll +just cover SVG and PNG in this article, but adding support for other formats is +trivial). + +PlantUML ships as a JAR file, which needs to be run with Java. An invocation +generating the sample image above would look like that: + +```bash +java -jar plantuml.jar -tpng -p < 'mysource.puml' > 'dir/myoutput.png' +``` + +Pretty straightforward: run the JAR, with a single option for the image type, +pipe the content of the input file and get the output file back. The `-p` flag +is the short form of `-pipe`, which we're using as using pipes is the only way +of properly controlling the output path (without that, PlantUML tries to be +smart and places the output next to the input). + +With a configuration file: + +```bash +java -jar plantuml.jar -tpng -config config.puml -p < 'mysource.puml' > 'dir/myoutput.png' +``` + +Simple enough, right? Well, not really. PlantUML actually integrates some +metadata in the files it generates. For example, when generating an SVG: + +```svg + + + + +``` + +This makes PlantUML non hermetic by default (in addition to the fonts issue +mentioned earlier). While PlantUML has a simple way of working around that (in +the form of a `-nometadata` flag), this is something to keep in mind when +integrating a tool with Bazel: is this tool usable in a hermetic way? If not, +how to minimise the impact of this non-hermeticity? + +From there, here is the invocation we'll work with: + +```bash +java -jar plantuml.jar -tpng -nometadata -config config.puml \ + -p < 'mysource.puml' > 'dir/myoutput.png' +``` + +## Getting PlantUML + +PlantUML is a Java application, available as a JAR on Maven. As such, it can be +fetched with the help of +[rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external/), as was +explained in +[a previous article](@/posts/creating-a-blog-with-bazel/02-compiling-a-kotlin-application-with-bazel/index.md#dependencies). +The Maven rules will expose the JAR as a library, but we need a binary to be +able to run it. In e.g. `//third_party/plantuml/BUILD`: + +```python +load("@rules_java//java:defs.bzl", "java_binary") + +java_binary( + name = "plantuml", + main_class = "net.sourceforge.plantuml.Run", + visibility = ["//visibility:public"], + runtime_deps = [ + "@maven//:net_sourceforge_plantuml_plantuml", + ], +) +``` + +From there, we can use `//third_party/plantuml` as any Bazel binary target - we +can run it with `bazel run`, and we can pass it as a tool for rule actions. + +This is a pattern that works well for any JVM-based tool. Other kinds of tools +will need a different preparation step to make them available through Bazel - +but as long as you can get a binary, you should be good. + +## Rule set structure + +This rule set will follow the same structure we previously used for +[Ktlint](@/posts/creating-a-blog-with-bazel/02-compiling-a-kotlin-application-with-bazel/index.md#ktlint): + +- Based in `//tools/plantuml` +- A public interface exposed in `//tools/plantuml/defs.bzl` +- Internal actions definition in `//tools/plantuml/internal/actions.bzl` +- Internal rule definition in `//tools/plantuml/internal/rules.bzl` + +But in addition: + +- Tests for the actions in `//tools/plantuml/internal/actions_test.bzl` +- Integration tests in `//tools/plantuml/tests` + +Let's start by defining our actions. + +## Actions + +### Implementation + +We need only one action for our rule: one that takes a source file, an optional +configuration file, the PlantUML binary, and emits the output file by calling +PlantUML. Let's assume for a moment we have a helper function which, given the +proper input, returns the PlantUML command line to call, called +`plantuml_command_line`, and write the action from there: + +```python +def plantuml_generate(ctx, src, format, config, out): + """Generates a single PlantUML graph from a puml file. + + Args: + ctx: analysis context. + src: source file to be read. + format: the output image format. + config: the configuration file. Optional. + out: output image file. + """ + command = plantuml_command_line( + executable = ctx.executable._plantuml_tool.path, + config = config.path if config else None, + src = src.path, + output = out.path, + output_format = format, + ) + + inputs = [src] + + if config: + inputs.append(config) + + ctx.actions.run_shell( + outputs = [out], + inputs = inputs, + tools = [ctx.executable._plantuml_tool], + command = command, + mnemonic = "PlantUML", + progress_message = "Generating %s" % out.basename, + ) +``` + +This is pretty straightforward: we generate the command line, passing either the +attributes' respective paths (or `None` for the configuration file if it's not +provided, since it's optional), as well as the requested image format. We define +that both our source file and configuration files are inputs, and PlantUML is a +requested tool. + +Now let's implement our helper function. It's there again really +straightforward: it gets a bunch of paths as input, and needs to generate a +command line call (in the form of a simple string) from them: + +```python +def plantuml_command_line(executable, config, src, output, output_format): + """Formats the command line to call PlantUML with the given arguments. + + Args: + executable: path to the PlantUML binary. + config: path to the configuration file. Optional. + src: path to the source file. + output: path to the output file. + output_format: image format of the output file. + + Returns: + A command to invoke PlantUML + """ + + command = "%s -nometadata -p -t%s " % ( + shell.quote(executable), + output_format, + ) + + if config: + command += " -config %s " % shell.quote(config) + + command += " < %s > %s" % ( + shell.quote(src), + shell.quote(output), + ) + + return command +``` + +An interesting note is that because PlantUML is already integrated as an +executable Bazel target, we don't care that it's a JAR, a C++ binary or a shell +script: Bazel knows exactly what this executable is made of, how to prepare +(e.g. compile) it if necessary, its runtime dependencies (in this case, a JRE) +and, more importantly in this context, how to run it. We can treat our tool +target as a single executable file, and run it as such just from its path. +Bazel will automatically make sure to provide us with everything we need. (For +more details: the target actually points to a shell script generated by Bazel, +through the Java rules, which in the case of a `java_binary` target is +responsible for defining the classpath, among other things. The JAR file is +merely a dependency of this shell script, and as such is provided as a runtime +dependency.) + +Writing this as a helper function rather than directly in the action definition +serves two purposes: not only does it make the whole thing slightly easier to +read, but this function, which contains the logic (even though in this case it's +really simple), is easily testable: it takes only strings as arguments, and +returns a string. It's also a pure function: it doesn't have any side effect, +and as such it will always return the same output given the same set of inputs. + +### Tests + +To test Starlark functions like this one, Bazel's +[Skylib](https://github.com/bazelbuild/bazel-skylib) provides a test framework +which, while requiring a bit of boilerplate, is pretty simple to use. In this +specific case, we only have two different cases to test: with and without +configuration file provided. Error cases should be unreachable due to the way +the rule will be defined: Bazel will be responsible for enforcing the presence +of an executable target for PlantUML's binary, a valid image format... Let's see +how that works. In `//tools/plantuml/internal/actions_test.bzl`: + +```python +"""Unit tests for PlantUML action""" + +load("@bazel_skylib//lib:unittest.bzl", "asserts", "unittest") +load(":actions.bzl", "plantuml_command_line") + +def _no_config_impl(ctx): + env = unittest.begin(ctx) + asserts.equals( + env, + "'/bin/plantuml' -nometadata -p -tpng < 'mysource.puml' > 'dir/myoutput.png'", + plantuml_command_line( + executable = "/bin/plantuml", + config = None, + src = "mysource.puml", + output = "dir/myoutput.png", + output_format = "png", + ), + ) + return unittest.end(env) + +no_config_test = unittest.make(_no_config_impl) + +def _with_config_impl(ctx): + env = unittest.begin(ctx) + asserts.equals( + env, + "'/bin/plantuml' -nometadata -p -tpng -config 'myskin.skin' < 'mysource.puml' > 'dir/myoutput.png'", + plantuml_command_line( + executable = "/bin/plantuml", + config = "myskin.skin", + src = "mysource.puml", + output = "dir/myoutput.png", + output_format = "png", + ), + ) + return unittest.end(env) + +with_config_test = unittest.make(_with_config_impl) + +def actions_test_suite(): + unittest.suite( + "actions_tests", + no_config_test, + with_config_test, + ) +``` + +First, we define two functions, which are the actual test logic: +`_no_config_impl` and `_with_config_impl`. Their content is pretty simple: we +start a unit test environment, we invoke our test function and assert that the +result is indeed what we expected, and we close the unit test environment. The +return value is needed by the test framework, as it's what carries what +assertions passed or failed. + +Next, we declare those two functions as actual unit tests, wrapping them with a +call to `unittest.make`. We can then add those two test targets to a test suite, +which is what actually generates a test target when invoked. Which means that +this macro needs to be invoked, in the `BUILD` file: + +```python +load(":actions_test.bzl", "actions_test_suite") + +actions_test_suite() +``` + +We can run our tests, and hopefully everything should pass: + +```bash +$ bazel test //tools/plantuml/internal:actions_tests +INFO: Invocation ID: 112bd049-7398-4b23-b62b-1398e9731eb7 +INFO: Analyzed 2 targets (5 packages loaded, 927 targets configured). +INFO: Found 2 test targets... +INFO: Elapsed time: 0.238s, Critical Path: 0.00s +INFO: 0 processes. +//tools/plantuml/internal:actions_tests_test_0 PASSED in 0.4s +//tools/plantuml/internal:actions_tests_test_1 PASSED in 0.3s + +Executed 0 out of 2 tests: 2 tests pass. +INFO: Build completed successfully, 1 total action +``` + +## Rules definition + +Similarly as the actions definition, we only have one rule to define here. Let's +call it `plantuml_graph()`. It needs our usual set of inputs, and outputs a +single file, which name will be `${target_name}.{image_format}`. It's also where +we define the set of acceptable image formats, the fact that the input file is +mandatory but the configuration file optional, and the actual executable target +to use for PlantUML. The only thing we actually do is, as expected, calling our +`plantuml_generate` action defined above. + +```python +load( + ":actions.bzl", + "plantuml_generate", +) + +def _plantuml_graph_impl(ctx): + output = ctx.actions.declare_file("{name}.{format}".format( + name = ctx.label.name, + format = ctx.attr.format, + )) + plantuml_generate( + ctx, + src = ctx.file.src, + format = ctx.attr.format, + config = ctx.file.config, + out = output, + ) + + return [DefaultInfo( + files = depset([output]), + )] + +plantuml_graph = rule( + _plantuml_graph_impl, + attrs = { + "config": attr.label( + doc = "Configuration file to pass to PlantUML. Useful to tweak the skin", + allow_single_file = True, + ), + "format": attr.string( + doc = "Output image format", + default = "png", + values = ["png", "svg"], + ), + "src": attr.label( + allow_single_file = [".puml"], + doc = "Source file to generate the graph from", + mandatory = True, + ), + "_plantuml_tool": attr.label( + default = "//third_party/plantuml", + executable = True, + cfg = "host", + ), + }, + outputs = { + "graph": "%{name}.%{format}", + }, + doc = "Generates a PlantUML graph from a puml file", +) +``` + +## Public interface + +As we only have a single rule, and nothing else specific to do, the public +interface is dead simple: + +```python +load("//tools/plantuml/internal:rules.bzl", _plantuml_graph = "plantuml_graph") + +plantuml_graph = _plantuml_graph +``` + +You might then be wondering: why is this useful, and why shouldn't I just import +the rule definition from `//tools/plantuml/internal:rules.bzl` directly? Having +this kind of public interface allows you to tweak the actual rule definition +without breaking any consumer site, as long as you respect the public interface. +You can also add features to every consumer site in a really simple way. Let's +imagine for example that you have a `view_image` rule which, given an image +file, generates a script to view it, you could then transform your public +interface like this: + +```python +load("//tools/plantuml/internal:rules.bzl", _plantuml_graph = "plantuml_graph") +load("//tools/utils:defs.bzl", _view_image = "view_image") + +def plantuml_graph(name, src, config, format): + _plantuml_graph( + name = name, + src = src, + config = config, + format = format, + ) + + _view_image( + name = "%s.view" % name, + src = ":%s.%s" % (name, format), + ) +``` + +And suddenly, all your PlantUML graphs have an implicit `.view` target defined +automatically, allowing you to see the output directly without having to dig in +Bazel's output directories. + +A set of Bazel rules for LaTeX actually provides such a feature to view the PDF +output: they have a +[`view_pdf.sh` script](https://github.com/ProdriveTechnologies/bazel-latex/blob/master/view_pdf.sh), +used by their main +[`latex_document` macro](https://github.com/ProdriveTechnologies/bazel-latex/blob/master/latex.bzl#L45). + +## Further testing + +For a rule this simple, I took just a simple further step: having a few +reference PlantUML graphs, as well as their expected rendered output, which I +compare through Phosphorus, a really simple tool I wrote to help compare two +images, covered in the previous article (I told you it would be useful!). But +for more complex cases, Skylib offer more utilities like an +[analysis test](https://github.com/bazelbuild/bazel-skylib/blob/master/docs/analysis_test_doc.md), +and a +[build test](https://github.com/bazelbuild/bazel-skylib/blob/master/docs/build_test_doc.md). + +## Closing thoughts + +While writing this kind of tools might look like a lot of works, it's actually +pretty mechanical for a lot of cases. I worked on a few others like +[markdownlint](https://github.com/igorshubovych/markdownlint-cli), which now +runs on all my Markdown files as regular Bazel test targets, or +[pngcrush](https://pmt.sourceforge.io/pngcrush/), which is ran on the PNG files +hosted on this blog. In a monorepo, writing such a rule is the kind of task that +you do once, and it just keeps on giving - you can easily compose different +rules with a main use-case, with a bunch of test targets generated for virtually +free. + +On another note, I'm aware that having all this in a public repository would +make things much simpler to follow. Sadly, it's part of a larger mono-repository +which makes open-sourcing only the relevant parts tricky. Dumping a snapshot +somewhere would be an option, but I'd rather have an actual living repository. + +Now that we have all the tools we need (that was kind of convoluted, I'll give +you that), there are only two steps left to cover: + +- Generating the actual blog (ironically enough, this will be a really quick +step, despite being the only really important one) +- Managing the deployment. + +We're getting there! diff --git a/content/posts/creating-a-blog-with-bazel/_index.md b/content/posts/creating-a-blog-with-bazel/_index.md new file mode 100644 index 0000000..d7c0f9a --- /dev/null +++ b/content/posts/creating-a-blog-with-bazel/_index.md @@ -0,0 +1,11 @@ ++++ +title = "Creating a blog with Bazel" +template = "series.html" +sort_by = "slug" +transparent = true + +[extra] +series = true ++++ + +This series explores building and deploying a blog using Bazel, covering everything from basic Kotlin compilation to writing custom Bazel rules. diff --git a/content/posts/hosting-different-kinds-of-apps-on-nginx.md b/content/posts/hosting-different-kinds-of-apps-on-nginx.md new file mode 100644 index 0000000..40d10dd --- /dev/null +++ b/content/posts/hosting-different-kinds-of-apps-on-nginx.md @@ -0,0 +1,464 @@ ++++ +template = "article.html" +title = "Hosting different kinds of apps on nginx" +date = 2014-10-15T10:55:00+02:00 +description = "An introduction to nginx as a web server and reverse proxy, covering how to host static sites, PHP applications, and Node.js apps." + +[taxonomies] +tags = ["nginx", "web"] ++++ + +## Engine what? + +Nginx (engine-x) is a web server and reverse proxy for web and mail protocols +(HTTP, HTTPS, SMTP, POP3 and IMAP). It has been first released in 2004, and its +usage keeps growing ever since (according to +[Netcraft](http://news.netcraft.com/archives/2014/08/27/august-2014-web-server-survey.html), +it was hosting 14.47% of active sites in August 2014). + +It's capable of hosting many kinds of applications: + +- static HTML pages +- PHP, using [PHP-FPM](http://en.wikipedia.org/wiki/PHP#PHPFPM) +- Ruby on Rails and any kind of Rack-based Ruby application, using + [Phusion Passenger](https://www.phusionpassenger.com/) +- proxying requests to another webserver (e.g. a software launching its own web + server, like [Kodi](http://xbmc.org/)) + + + +## Set up the bases + +The architecture described in this post is pretty simple: + +- a default virtual host (vhost) for the top-level domain name, also catching + requests to unknown sub-domains +- different applications hosted on sub-domains +- some vhosts will be HTTPS-only, some will offer it without being mandatory +- enabling or disabling a vhost must be easy + +### Installing nginx + +Nginx uses static modules, enabled or disabled at compile-time. It's important +to decide what you need before installing nginx. The only non-default module +used in this post is Passenger, needed to host Rack-based applications. +Everything else will work without it. + +Nginx works on any decent \*nix. It's probably available in your OS +repositories. If it's not, please refer to the +[official installation guide](http://wiki.nginx.org/Install). On Archlinux, +a package is available on +[AUR](https://aur.archlinux.org/packages/nginx-passenger) including the +Passenger module: + +`yaourt -S nginx-passenger` + +### Configuration + +Once nginx is installed, we need to configure a basic configuration. I'll refer +to the configuration root directory as `$CONFDIR`. It's usually `/etc/nginx/`. + +Note that nginx needs to be restarted to reflect any configuration change. + +#### Directory structure + +To ease the configuration, we'll split it across three folders: + +- `$CONFDIR` will contain all the general files (PHP configuration, main nginx + configuration file…) +- `$CONFDIR/ssl` will contain the SSL certificates +- `$CONFDIR/vhosts` will contain our vhosts definitions + +#### Main configuration file + +Here's the basic configuration file we'll start with: + +{{ filename(body="$CONFDIR/nginx.conf") }} + +```nginx +worker_processes auto; + +events { + worker_connections 1024; +} + +http { + proxy_send_timeout 600s; + proxy_read_timeout 600s; + fastcgi_send_timeout 600s; + fastcgi_read_timeout 600s; + include mime.types; + default_type application/octet-stream; + sendfile on; + keepalive_timeout 0; + gzip on; + index index.html index.htm; + client_max_body_size 2048m; + + server { + listen 0.0.0.0; + server_name enoent.fr; + + access_log /var/log/nginx/localhost.access_log; + error_log /var/log/nginx/localhost.error_log info; + + root /srv/http/localhost; + } +} +``` + +This file sets up an nginx instance with some decent settings (enable gzip, use +`index.html` or `index.htm` as default index pages…), and defines our default +vhost. It answers to every request targeting the hostname _enoent.fr_. It will +serve static pages found in `/srv/http/localhost`. + +## SSL support + +As mentioned earlier, we'll have two SSL behaviours depending on the vhost: + +- SSL is offered, but not mandatory (vhost answers to both HTTP and HTTPS) +- SSL is offered, and mandatory (vhost answers on HTTPS, and redirect to HTTPS + when it receives a request on HTTP) + +We will need two files to define these two behaviours. One of them will have +to be included in every vhost, depending on the SSL politic we want for this +specific vhost. + +### Shared configuration + +Here we go for the first configuration file: + +{{ filename(body="$CONFDIR/ssl_opt.conf") }} + +```nginx +ssl_certificate_key /etc/nginx/ssl/ssl-decrypted.key; +add_header Strict-Transport-Security max-age=31536000; +ssl_prefer_server_ciphers on; +ssl_ciphers ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA:RC4-SHA:AES256-GCM-SHA384:AES256-SHA256:CAMELLIA256-SHA:ECDHE-RSA-AES128-SHA:AES128-GCM-SHA256:AES128-SHA256:AES128-SHA:CAMELLIA128-SHA; +ssl_session_cache shared:SSL:10m; +ssl_session_timeout 10m; +keepalive_timeout 70; +``` + +You can obviously adapt this file to your specific needs. It defines: + +- the SSL key used (`/etc/nginx/ssl/ssl-decrypted.key`) +- a default max-age header +- a list of accepted SSL ciphers +- session, cache and keepalive durations + +The other file will define the exact same settings, adding just one directive: +the SSL is mandatory. Instead of copy and paste all of this, here's what we +can do: + +{{ filename(body="$CONFDIR/ssl.conf") }} + +```nginx +include ssl_opt.conf; +ssl on; +``` + +### Enabling SSL for a vhost + +To enable SSL on a vhost, we'll need to make three or four modifications to the +vhost definition, depending on the SSL policy. + +#### Non-mandatory SSL + +If the SSL is not mandatory, we'll need to: + +- enable listening on port 443 in addition to the default 80 +- choose the certificate we want to use +- include the SSL policy file + +Here's how it translates, for our first vhost defined earlier: + +{{ filename(body="$CONFDIR/nginx.conf (server block only)") }} + +```nginx +server { + listen 0.0.0.0:80; + listen 0.0.0.0:443 ssl; + server_name enoent.fr; + + access_log /var/log/nginx/localhost.access_log; + error_log /var/log/nginx/localhost.error_log info; + + root /srv/http/localhost; + + ssl_certificate /etc/nginx/ssl/enoent.fr.crt; + include ssl_opt.conf; +} +``` + +#### Mandatory SSL + +If the SSL is mandatory, we'll need to: + +- enable listening on port 443 __instead of__ the default 80 +- choose the certificate we want to use +- include the SSL policy file +- redirect HTTP requests to HTTPS + +And here's the result for our first vhost: + +{{ filename(body="$CONFDIR/nginx.conf (server block only)") }} + +```nginx +server { + listen 0.0.0.0:80; + server_name enoent.fr; + rewrite ^ https://$server_name$request_uri? permanent; +} + +server { + listen 0.0.0.0:443 ssl; + server_name enoent.fr; + + access_log /var/log/nginx/localhost.access_log; + error_log /var/log/nginx/localhost.error_log info; + + root /srv/http/localhost; + + ssl_certificate /etc/nginx/ssl/enoent.fr.crt; + include ssl.conf; +} +``` + +The first `server` block is here to do the redirection, as our inital server +only listens on port 443. + +## Virtual hosts + +As we saw in the [SSL](#ssl-support) part, we can define as many `server` blocks +as we want. Each of them is able to respond to requests targeting different +hostnames or ports. We also saw earlier the `include` directive, allowing us to +include a file in another. + +With this in mind, it's pretty simple to set up a vhost pool from which we can +enable or disable some of them easily. Simply put a file per vhost in a +directory, and include it to enable the corresponding vhost, or remove the +include to disable it. + +Here are some templates for different virtual hosts, each one containing only +the minimum (no SSL-specific settings, for example). + +### Static HTML + +We already saw earlier how to define a virtual host when we set up our main +`nginx.conf` file: + +{{ filename(body="$CONFDIR/vhosts/static_html.conf") }} + +```nginx +server { + listen 0.0.0.0; + server_name enoent.fr; + + access_log /var/log/nginx/localhost.access_log; + error_log /var/log/nginx/localhost.error_log info; + + root /srv/http/localhost; +} +``` + +The only interesting directive here is the `root` one. It will map the root of +the web server to this local folder. A request for +`http://enoent.fr/my_awesome_page.html` will return the content of +`/srv/http/localhost/my_awesome_page.html`. + +### Reverse proxy + +A reverse proxy may be useful when you have a web server already running, and +want to expose it somewhere else. Let's say we have a NAS on our local network, +its web ui being accessible on `http://nas.local:8080`, and we want to expose it +on `http://nas.enoent.fr`, on the default HTTP port: + +{{ filename(body="$CONFDIR/vhosts/reverse_proxy.conf") }} + +```nginx +server { + listen 0.0.0.0; + server_name nas.enoent.fr; + + access_log /var/log/nginx/nas.access_log; + error_log /var/log/nginx/nas.error_log info; + + location / { + proxy_headers_hash_max_size 1024; + proxy_headers_hash_bucket_size 128; + proxy_pass http://nas.local:8080; + } +} +``` + +The `location /` block here defines a behaviour for all requests matching +`nas.enonet.fr/*`. In our case, that's all of them, as we only have one +`location` block. + +Inside of it, we have some settings for our reverse proxy (maximum headers +size), and the really interesting part: the `proxy_pass` entry, which defines +where are redirected the incoming requests. + +### PHP + +To allow PHP applications to work, we'll need a PHP interpreter. More +specifically, we'll use [PHP-FPM](http://php-fpm.org/). PHP-FPM is a FastCGI PHP +processor. It's a daemon listening on a socket, waiting for PHP scripts, and +returning the PHP output. The configuration of PHP-FPM is out of this article +scope, but we'll need to have it running, and note where it can be acceded (a +local Unix socket, or a TCP socket, either remote or local). + +We need to define a behaviour for PHP files, telling nginx how to process them: + +{{ filename(body="$CONFDIR/php.conf") }} + +```nginx +location ~ ^(.+\.php)(.*)$ { + include fastcgi_params; + fastcgi_pass unix:/run/php-fpm/php-fpm.sock; + fastcgi_split_path_info ^(.+\.php)(.*)$; + fastcgi_param PATH_INFO $fastcgi_path_info; + fastcgi_param SCRIPT_FILENAME $document_root/$fastcgi_script_name; +} +``` + +This file specifies how files with a `.php` extension will be processed. Nginx +will split the arguments and filename, and pass them to the PHP-FPM socket, +which here is listening on the Unix socket at `/run/php-fpm/php-fpm.sock`. For a +TCP socket, the line 3 would need to be changed to something like this: + +{{ filename(body="$CONFDIR/php.conf - TCP socket") }} + +```nginx +location ~ ^(.+\.php)(.*)$ { + include fastcgi_params; + fastcgi_pass 127.0.0.1:9000; + fastcgi_split_path_info ^(.+\.php)(.*)$; + fastcgi_param PATH_INFO $fastcgi_path_info; + fastcgi_param SCRIPT_FILENAME $document_root/$fastcgi_script_name; +} +``` + +Next, to define a vhost hosting some PHP scripts, we simply need to include this +file: + +{{ filename(body="$CONFDIR/vhosts/php.conf") }} + +```nginx +server { + listen 0.0.0.0; + server_name my-awesome-php-app.enoent.fr; + + access_log /var/log/nginx/my-awesome-php-app.access_log; + error_log /var/log/nginx/my-awesome-php-app.error_log info; + + root /srv/http/localhost; + include php.conf; +} +``` + +### Rack + +Rack-based applications need [Passenger](https://www.phusionpassenger.com/) to +work. Passenger is pretty similar to PHP-FPM, but its configuration with nginx +is easier. Note that it needs to be built in nginx. + +To enable it, we need to tweak our `http` block in `$CONFDIR/nginx.conf` to +specify our Passenger root directory and path to the `ruby` executable: + +{{ filename(body="$CONFDIR/nginx.conf") }} + +```nginx +worker_processes auto; + +events { + worker_connections 1024; +} + +http { + proxy_send_timeout 600s; + proxy_read_timeout 600s; + fastcgi_send_timeout 600s; + fastcgi_read_timeout 600s; + include mime.types; + default_type application/octet-stream; + sendfile on; + keepalive_timeout 0; + gzip on; + index index.html index.htm; + client_max_body_size 2048m; + + passenger_root /usr/lib/passenger; + passenger_ruby /usr/bin/ruby; +} +``` + +Once this is done, to set up a Rack vhost, we just need to enable Passenger on +it, and define which environment we want to use for Rails applications: + +{{ filename(body="$CONFDIR/vhosts/rack.conf") }} + +```nginx +server { + listen 0.0.0.0; + server_name rack-app.enoent.fr; + + access_log /var/log/nginx/rack-app.access_log; + error_log /var/log/nginx/rack-app.error_log info; + + root /srv/http/rack-app/public; + + passenger_enabled on; + rails_env production; +} +``` + +Note that the directory set as `root` must match the `public` directory of your +Rack application. + +### Using all of these templates + +Once we have written our vhosts definition files in `$CONFDIR/vhosts`, enabling +or disabling one is really easy. We just need to include the corresponding file +in the `http` block of our `$CONFDIR/nginx.conf` file: + +{{ filename(body="$CONFDIR/nginx.conf") }} + +```nginx +worker_processes auto; + +events { + worker_connections 1024; +} + +http { + proxy_send_timeout 600s; + proxy_read_timeout 600s; + fastcgi_send_timeout 600s; + fastcgi_read_timeout 600s; + include mime.types; + default_type application/octet-stream; + sendfile on; + keepalive_timeout 0; + gzip on; + index index.html index.htm; + client_max_body_size 2048m; + + passenger_root /usr/lib/passenger; + passenger_ruby /usr/bin/ruby; + + include vhosts/static_html.conf; + include vhosts/reverse_proxy.conf; + include vhosts/php.conf; + include vhosts/rack.conf; +} +``` + +Obviously, if we don't include any Rack vhost, we don't need the lines 20 and +21 as they are Passenger-specific. + +We can name our vhosts files whatever we like, and create as many as we need. +Having the general configuration split in reusable files allows an easy +maintenance. When deploying a new PHP application, we just need to include +`php.conf`, and not think "where is my PHP-FPM listening again?". It just works. diff --git a/content/posts/ktor-altering-served-content.md b/content/posts/ktor-altering-served-content.md new file mode 100644 index 0000000..be25b49 --- /dev/null +++ b/content/posts/ktor-altering-served-content.md @@ -0,0 +1,146 @@ ++++ +template = "article.html" +title = "Ktor: Altering served content" +date = 2022-09-06T14:00:00+10:00 +description = "Learn how to create Ktor plugins that transform response content, with a practical example of injecting scripts into HTML files." + +[taxonomies] +tags = ["kotlin", "ktor", "web"] ++++ + +When serving files with [Ktor](https://ktor.io), there might be times when you need to alter those +files. For example, you might want to inject a script in every served HTML file. For this purpose, +we can leverage [plugins](https://ktor.io/docs/plugins.html). Plugins can hook at different stages +of the request/response pipeline: + +{% mermaid(caption="Ktor request/response pipeline") %} +graph LR + Request --> Plugin1[Plugin] + Plugin1 --> Handler + Handler --> Plugin2[Plugin] + Plugin2 --> Response +{% end %} + +Let's write a plugin that transforms a specific type of files - going with the previous example of +injecting a script to every served HTML file. Our plugin needs to: + +- Take a script URL as an input. If not provided, it won't do anything. +- Add that script as a ` + {% endif %} + + {% if config.extra.analytics.service == "plausible" %} + + {% endif %} + + + + + + +
+
+ {% block content %}{% endblock content %} +
+
+ + diff --git a/themes/retro-future/templates/index.html b/themes/retro-future/templates/index.html new file mode 100644 index 0000000..607be1b --- /dev/null +++ b/themes/retro-future/templates/index.html @@ -0,0 +1,37 @@ +{% extends "base.html" %} +{% import "macros.html" as macros %} + +{% block content %} +

Blog

+ +{% if section.description %} +

{{ section.description }}

+{% endif %} + +
    + {% if paginator %} + {% for page in paginator.pages %} + {{ macros::post_list_item(page=page) }} + {% endfor %} + {% else %} + {% set posts_section = get_section(path="posts/_index.md") %} + {% for page in posts_section.pages %} + {{ macros::post_list_item(page=page) }} + {% endfor %} + {% endif %} +
+ +{% if paginator %} + +{% endif %} +{% endblock content %} diff --git a/themes/retro-future/templates/macros.html b/themes/retro-future/templates/macros.html new file mode 100644 index 0000000..20efe19 --- /dev/null +++ b/themes/retro-future/templates/macros.html @@ -0,0 +1,59 @@ +{% macro post_list_item(page, show_tags=true, show_series=true) %} +
  • +

    + {{ page.title }} +

    + {% if show_series %} + {% set path_parts = page.relative_path | split(pat="/") %} + {% if path_parts | length > 2 %} + {% set parent_dir = path_parts | slice(end=-2) | join(sep="/") %} + {% set parent_section = get_section(path=parent_dir ~ "/_index.md") %} + {% if parent_section and parent_section.extra.series %} +

    Part of the series: {{ parent_section.title }}

    + {% endif %} + {% endif %} + {% endif %} + +
    + + + {% if page.reading_time %} + + {{ page.reading_time }} min read + {% endif %} + + {% if show_tags and page.taxonomies.tags %} + +
      + {% for tag in page.taxonomies.tags %} +
    • {{ tag }}
    • + {% endfor %} +
    + {% endif %} +
    + + {% if page.description %} +

    {{ page.description }}

    + {% endif %} +
  • +{% endmacro %} + +{% macro post_meta(page, show_tags=true) %} + +{% endmacro %} diff --git a/themes/retro-future/templates/page.html b/themes/retro-future/templates/page.html new file mode 100644 index 0000000..9e5b841 --- /dev/null +++ b/themes/retro-future/templates/page.html @@ -0,0 +1,13 @@ +{% extends "base.html" %} + +{% block title %}{{ page.title }} · {{ config.title }}{% endblock title %} + +{% block description %}{{ page.description | default(value=page.summary) | default(value=config.description) }}{% endblock description %} + +{% block content %} +

    {{ page.title }}

    + +
    + {{ page.content | safe }} +
    +{% endblock content %} diff --git a/themes/retro-future/templates/section.html b/themes/retro-future/templates/section.html new file mode 100644 index 0000000..cf8ccf7 --- /dev/null +++ b/themes/retro-future/templates/section.html @@ -0,0 +1,37 @@ +{% extends "base.html" %} + +{% block title %}{{ section.title }} · {{ config.title }}{% endblock title %} + +{% block content %} +

    {{ section.title }}

    + +{% if section.content %} +
    + {{ section.content | safe }} +
    +{% endif %} + +{% if section.pages %} +
      + {% for page in section.pages %} +
    • +

      + {{ page.title }} +

      + {% set path_parts = page.relative_path | split(pat="/") %} + {% if path_parts | length > 2 %} + {% set parent_dir = path_parts | slice(end=-2) | join(sep="/") %} + {% set parent_section = get_section(path=parent_dir ~ "/_index.md") %} + {% if parent_section and parent_section.extra.series %} +

      Part of the series: {{ parent_section.title }}

      + {% endif %} + {% endif %} + + {% if page.description %} +

      {{ page.description }}

      + {% endif %} +
    • + {% endfor %} +
    +{% endif %} +{% endblock content %} diff --git a/themes/retro-future/templates/series.html b/themes/retro-future/templates/series.html new file mode 100644 index 0000000..6aef9ce --- /dev/null +++ b/themes/retro-future/templates/series.html @@ -0,0 +1,28 @@ +{% extends "base.html" %} +{% import "macros.html" as macros %} + +{% block title %}{{ section.title }} · {{ config.title }}{% endblock title %} + +{% block description %}{{ section.description | default(value=config.description) }}{% endblock description %} + +{% block content %} +

    {{ section.title }}

    + +{% if section.description %} +

    {{ section.description }}

    +{% endif %} + +{% if section.content %} +
    + {{ section.content | safe }} +
    +{% endif %} + + +{% endblock content %} diff --git a/themes/retro-future/templates/shortcodes/aside.html b/themes/retro-future/templates/shortcodes/aside.html new file mode 100644 index 0000000..183c342 --- /dev/null +++ b/themes/retro-future/templates/shortcodes/aside.html @@ -0,0 +1,6 @@ + diff --git a/themes/retro-future/templates/shortcodes/filename.html b/themes/retro-future/templates/shortcodes/filename.html new file mode 100644 index 0000000..80cdbcd --- /dev/null +++ b/themes/retro-future/templates/shortcodes/filename.html @@ -0,0 +1 @@ +
    {{ body }}
    diff --git a/themes/retro-future/templates/shortcodes/mermaid.html b/themes/retro-future/templates/shortcodes/mermaid.html new file mode 100644 index 0000000..4ac4518 --- /dev/null +++ b/themes/retro-future/templates/shortcodes/mermaid.html @@ -0,0 +1,4 @@ +
    +
    {{ body }}
    + {% if caption %}
    {{ caption }}
    {% endif %} +
    diff --git a/themes/retro-future/templates/taxonomy_list.html b/themes/retro-future/templates/taxonomy_list.html new file mode 100644 index 0000000..709dff2 --- /dev/null +++ b/themes/retro-future/templates/taxonomy_list.html @@ -0,0 +1,16 @@ +{% extends "base.html" %} + +{% block title %}{{ taxonomy.name | title }} · {{ config.title }}{% endblock title %} + +{% block content %} +

    {{ taxonomy.name | title }}

    + +
      + {% for term in terms %} +
    • + {{ term.name }} + ({{ term.pages | length }}) +
    • + {% endfor %} +
    +{% endblock content %} diff --git a/themes/retro-future/templates/taxonomy_single.html b/themes/retro-future/templates/taxonomy_single.html new file mode 100644 index 0000000..1d692fc --- /dev/null +++ b/themes/retro-future/templates/taxonomy_single.html @@ -0,0 +1,14 @@ +{% extends "base.html" %} +{% import "macros.html" as macros %} + +{% block title %}{{ term.name }} · {{ taxonomy.name | title }} · {{ config.title }}{% endblock title %} + +{% block content %} +

    {{ taxonomy.name | title }}: {{ term.name }}

    + +
      + {% for page in term.pages %} + {{ macros::post_list_item(page=page, show_tags=false) }} + {% endfor %} +
    +{% endblock content %} diff --git a/themes/retro-future/theme.toml b/themes/retro-future/theme.toml new file mode 100644 index 0000000..cb14528 --- /dev/null +++ b/themes/retro-future/theme.toml @@ -0,0 +1,5 @@ +name = "retro-future" +description = "A modern, accessible theme with clean typography and thoughtful color palette" +license = "MIT" +homepage = "https://enoent.fr" +min_version = "0.21.0"