Skip to content

GGML flags fix

GGML flags fix #27

Workflow file for this run

name: Build LLAMA.CPP
on:
push:
schedule:
- cron: "0 */12 * * *"
jobs:
windows-matrix:
outputs:
matrix: ${{ steps.matrices.outputs.result }}
name: Generate Windows Matrix
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v5
- name: Generate Matrices
uses: actions/github-script@v8
id: matrices
with:
script: |
const out = require("./win32.cjs");
return out();
# -D CMAKE_BUILD_TYPE=Release
windows:
needs: [windows-matrix]
name: Build for Windows
runs-on: windows-latest
strategy:
fail-fast: true
matrix:
include: ${{ fromJson(needs.windows-matrix.outputs.matrix) }}
steps:
- name: Checkout
uses: actions/checkout@v5
- name: Clone llama.cpp
run: git clone https://github.com/ggml-org/llama.cpp.git
- name: Build Ollama
shell: pwsh
run: |
cmake -B build -DLLAMA_CURL=OFF -D GGML_NATIVE=OFF ${{ matrix.flags }}
../activex.ps1
Compress-Archive -Path ${{ github.workspace }}\llama.cpp\build\bin\Release\* -DestinationPath ${{ github.workspace }}\llama.zip
working-directory: llama.cpp
- name: Upload
uses: actions/upload-artifact@v5
with:
path: |
./llama.zip
name: llama-cpp-windows-x64-${{ matrix.suffix }}
# - name: Upload Binaries
# uses: AButler/[email protected]
# with:
# files: |
# ./llama.cpp/llama.zip
# repo-token: ${{ secrets.GITHUB_TOKEN }}
# release-id: ${{ needs.create.outputs.release_id }}