如何解决android 应用程序开发与 ndk 和 tcc 可能吗?
是否可以在 tcc(微型 C 编译器)的帮助下创建 Android 应用程序?
对我来说,普通的 ndk APK 看起来很简单。
/meta-inf
文件夹下只有一些签名,/lib
、AndroidManifest.xml
和resources.arsc
下的动态*.so库。
├───lib
│ └───x86
│ └───gdbserver
│ └───libAndroid1.so
├───meta-inf
│ └───CERT.RSA
│ └───CERT.SF
│ └───MANIFEST.MF
├───AndroidManifest.xml
├───resources.arsc
而且,似乎 gcc/clang 编译器可以被 tcc 替换。我设法用命令编译了一个通用的 *.so 文件:
tcc -r dllmain.c -o dllmain.o
tcc -shared dllmain.o -o dll.so
但现在我不确定,我将如何通过用 tcc 替换 gcc/clang 来编译一个完整的 ndk 项目。
这是我失败的尝试:
C:\Users\gray\source\repos\Android1\Android1\Android1.NativeActivity>tcc -shared -r main.c -o libAndroid1.so -IC:\Microsoft\AndroidNDK\android-ndk-r15c\platforms\android-19\arch-x86\usr\include\
tcc: warning: -r: overriding compiler action already specified
In file included from main.c:24:
In file included from C:/Microsoft/AndroidNDK/android-ndk-r15c/platforms/android-19/arch-x86/usr/include//android/sensor.h:43:
In file included from C:/Microsoft/AndroidNDK/android-ndk-r15c/platforms/android-19/arch-x86/usr/include//sys/types.h:35:
C:/Microsoft/AndroidNDK/android-ndk-r15c/platforms/android-19/arch-x86/usr/include//sys/cdefs.h:283: error: #error "No function renaming possible"
此外,我将 main.cpp
重命名为 main.c
以使其与 tcc 一起使用。
main.c:
/*
* copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License,Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,software
* distributed under the License is distributed on an "AS IS" BASIS,* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#define LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO,"AndroidProject1.NativeActivity",__VA_ARGS__))
#define LOGW(...) ((void)__android_log_print(ANDROID_LOG_WARN,__VA_ARGS__))
/**
* Our saved state data.
*/
#include <android/sensor.h>
struct saved_state {
float angle;
int x;
int y;
};
/**
* Shared state for our app.
*/
struct engine {
struct android_apP* app;
ASensorManager* sensorManager;
const ASensor* accelerometerSensor;
ASensorEventQueue* sensorEventQueue;
int animating;
EGLdisplay display;
EGLSurface surface;
EGLContext context;
int width;
int height;
struct saved_state state;
};
/**
* Initialize an EGL context for the current display.
*/
static int engine_init_display(struct engine* engine) {
// initialize OpenGL ES and EGL
/*
* Here specify the attributes of the desired configuration.
* Below,we select an EGLConfig with at least 8 bits per color
* component compatible with on-screen windows
*/
const EGLint attribs[] = {
EGL_SURFACE_TYPE,EGL_WINDOW_BIT,EGL_BLUE_SIZE,8,EGL_GREEN_SIZE,EGL_RED_SIZE,EGL_NONE
};
EGLint w,h,format;
EGLint numConfigs;
EGLConfig config;
EGLSurface surface;
EGLContext context;
EGLdisplay display = eglGetdisplay(EGL_DEFAULT_disPLAY);
eglInitialize(display,0);
/* Here,the application chooses the configuration it desires. In this
* sample,we have a very simplified selection process,where we pick
* the first EGLConfig that matches our criteria */
eglChooseConfig(display,attribs,&config,1,&numConfigs);
/* EGL_NATIVE_VISUAL_ID is an attribute of the EGLConfig that is
* guaranteed to be accepted by ANativeWindow_setBuffersGeometry().
* As soon as we picked a EGLConfig,we can safely reconfigure the
* ANativeWindow buffers to match,using EGL_NATIVE_VISUAL_ID. */
eglGetConfigAttrib(display,config,EGL_NATIVE_VISUAL_ID,&format);
ANativeWindow_setBuffersGeometry(engine->app->window,format);
surface = eglCreateWindowSurface(display,engine->app->window,NULL);
context = eglCreateContext(display,NULL,NULL);
if (eglMakeCurrent(display,surface,context) == EGL_FALSE) {
LOGW("Unable to eglMakeCurrent");
return -1;
}
eglQuerySurface(display,EGL_WIDTH,&w);
eglQuerySurface(display,EGL_HEIGHT,&h);
engine->display = display;
engine->context = context;
engine->surface = surface;
engine->width = w;
engine->height = h;
engine->state.angle = 0;
// Initialize GL state.
glHint(GL_PERSPECTIVE_CORRECTION_HINT,GL_FASTEST);
glEnable(GL_CULL_FACE);
glShadeModel(GL_SMOOTH);
gldisable(GL_DEPTH_TEST);
return 0;
}
/**
* Just the current frame in the display.
*/
static void engine_draw_frame(struct engine* engine) {
if (engine->display == NULL) {
// No display.
return;
}
// Just fill the screen with a color.
glClearColor(((float)engine->state.x) / engine->width,engine->state.angle,((float)engine->state.y) / engine->height,1);
glClear(GL_COLOR_BUFFER_BIT);
eglSwapBuffers(engine->display,engine->surface);
}
/**
* Tear down the EGL context currently associated with the display.
*/
static void engine_term_display(struct engine* engine) {
if (engine->display != EGL_NO_disPLAY) {
eglMakeCurrent(engine->display,EGL_NO_SURFACE,EGL_NO_CONTEXT);
if (engine->context != EGL_NO_CONTEXT) {
eglDestroyContext(engine->display,engine->context);
}
if (engine->surface != EGL_NO_SURFACE) {
eglDestroySurface(engine->display,engine->surface);
}
eglTerminate(engine->display);
}
engine->animating = 0;
engine->display = EGL_NO_disPLAY;
engine->context = EGL_NO_CONTEXT;
engine->surface = EGL_NO_SURFACE;
}
/**
* Process the next input event.
*/
static int engine_handle_input(struct android_apP* app,AInputEvent* event) {
struct engine* engine = (struct engine*)app->userData;
if (AInputEvent_getType(event) == AINPUT_EVENT_TYPE_MOTION) {
engine->state.x = AMotionEvent_getX(event,0);
engine->state.y = AMotionEvent_getY(event,0);
return 1;
}
return 0;
}
/**
* Process the next main command.
*/
static void engine_handle_cmd(struct android_apP* app,int cmd) {
struct engine* engine = (struct engine*)app->userData;
switch (cmd) {
case APP_CMD_SAVE_STATE:
// The system has asked us to save our current state. Do so.
engine->app->savedState = malloc(sizeof(struct saved_state));
*((struct saved_state*)engine->app->savedState) = engine->state;
engine->app->savedStateSize = sizeof(struct saved_state);
break;
case APP_CMD_INIT_WINDOW:
// The window is being shown,get it ready.
if (engine->app->window != NULL) {
engine_init_display(engine);
engine_draw_frame(engine);
}
break;
case APP_CMD_TERM_WINDOW:
// The window is being hidden or closed,clean it up.
engine_term_display(engine);
break;
case APP_CMD_GAINED_FOCUS:
// When our app gains focus,we start monitoring the accelerometer.
if (engine->accelerometerSensor != NULL) {
ASensorEventQueue_enableSensor(engine->sensorEventQueue,engine->accelerometerSensor);
// We'd like to get 60 events per second (in us).
ASensorEventQueue_setEventRate(engine->sensorEventQueue,engine->accelerometerSensor,(1000L / 60) * 1000);
}
break;
case APP_CMD_LOST_FOCUS:
// When our app loses focus,we stop monitoring the accelerometer.
// This is to avoid consuming battery while not being used.
if (engine->accelerometerSensor != NULL) {
ASensorEventQueue_disableSensor(engine->sensorEventQueue,engine->accelerometerSensor);
}
// Also stop animating.
engine->animating = 0;
engine_draw_frame(engine);
break;
}
}
/**
* This is the main entry point of a native application that is using
* android_native_app_glue. It runs in its own thread,with its own
* event loop for receiving input events and doing other things.
*/
void android_main(struct android_apP* state) {
struct engine engine;
memset(&engine,sizeof(engine));
state->userData = &engine;
state->onAppCmd = engine_handle_cmd;
state->onInputEvent = engine_handle_input;
engine.app = state;
// Prepare to monitor accelerometer
engine.sensorManager = ASensorManager_getInstance();
engine.accelerometerSensor = ASensorManager_getDefaultSensor(engine.sensorManager,ASENSOR_TYPE_ACCELEROMETER);
engine.sensorEventQueue = ASensorManager_createEventQueue(engine.sensorManager,state->looper,LOOPER_ID_USER,NULL);
if (state->savedState != NULL) {
// We are starting with a prevIoUs saved state; restore from it.
engine.state = *(struct saved_state*)state->savedState;
}
engine.animating = 1;
// loop waiting for stuff to do.
while (1) {
// Read all pending events.
int ident;
int events;
struct android_poll_source* source;
// If not animating,we will block forever waiting for events.
// If animating,we loop until all events are read,then continue
// to draw the next frame of animation.
while ((ident = ALooper_pollAll(engine.animating ? 0 : -1,&events,(void**)&source)) >= 0) {
// Process this event.
if (source != NULL) {
source->process(state,source);
}
// If a sensor has data,process it Now.
if (ident == LOOPER_ID_USER) {
if (engine.accelerometerSensor != NULL) {
ASensorEvent event;
while (ASensorEventQueue_getEvents(engine.sensorEventQueue,&event,1) > 0) {
LOGI("accelerometer: x=%f y=%f z=%f",event.acceleration.x,event.acceleration.y,event.acceleration.z);
}
}
}
// Check if we are exiting.
if (state->destroyRequested != 0) {
engine_term_display(&engine);
return;
}
}
if (engine.animating) {
// Done with events; draw next animation frame.
engine.state.angle += .01f;
if (engine.state.angle > 1) {
engine.state.angle = 0;
}
// Drawing is throttled to the screen update rate,so there
// is no need to do timing here.
engine_draw_frame(&engine);
}
}
}
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。